From 879279c9b46b5a9606dfca96075e005624f0785d Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 24 Aug 2020 20:24:35 -0400 Subject: [PATCH] Introduce point in time APIs in x-pack basic (#61062) This commit introduces a new API that manages point-in-times in x-pack basic. Elasticsearch pit (point in time) is a lightweight view into the state of the data as it existed when initiated. A search request by default executes against the most recent point in time. In some cases, it is preferred to perform multiple search requests using the same point in time. For example, if refreshes happen between search_after requests, then the results of those requests might not be consistent as changes happening between searches are only visible to the more recent point in time. A point in time must be opened before being used in search requests. The `keep_alive` parameter tells Elasticsearch how long it should keep a point in time around. ``` POST /my_index/_pit?keep_alive=1m ``` The response from the above request includes a `id`, which should be passed to the `id` of the `pit` parameter of search requests. ``` POST /_search { "query": { "match" : { "title" : "elasticsearch" } }, "pit": { "id": "46ToAwMDaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQNpZHkFdXVpZDIrBm5vZGVfMwAAAAAAAAAAKgFjA2lkeQV1dWlkMioGbm9kZV8yAAAAAAAAAAAMAWICBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", "keep_alive": "1m" } } ``` Point-in-times are automatically closed when the `keep_alive` is elapsed. However, keeping point-in-times has a cost; hence, point-in-times should be closed as soon as they are no longer used in search requests. ``` DELETE /_pit { "id" : "46ToAwMDaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQNpZHkFdXVpZDIrBm5vZGVfMwAAAAAAAAAAKgFjA2lkeQV1dWlkMioGbm9kZV8yAAAAAAAAAAAMAWIBBXV1aWQyAAA=" } ``` #### Notable works in this change: - Move the search state to the coordinating node: #52741 - Allow searches with a specific reader context: #53989 - Add the ability to acquire readers in IndexShard: #54966 Relates #46523 Relates #26472 Co-authored-by: Jim Ferenczi --- docs/reference/search/point-in-time.asciidoc | 116 +++ .../ParentChildInnerHitContextBuilder.java | 8 +- .../test/scroll/20_keep_alive.yml | 4 +- .../search/scroll/SearchScrollIT.java | 4 +- .../search/AbstractSearchAsyncAction.java | 35 +- .../action/search/ClearScrollController.java | 76 +- .../action/search/DfsQueryPhase.java | 23 +- .../action/search/ExpandSearchPhase.java | 12 +- .../action/search/FetchSearchPhase.java | 54 +- .../action/search/ParsedScrollId.java | 6 +- .../action/search/SearchContextId.java | 116 +++ ...rNode.java => SearchContextIdForNode.java} | 36 +- .../SearchDfsQueryThenFetchAsyncAction.java | 9 +- .../action/search/SearchPhaseContext.java | 13 +- .../SearchQueryThenFetchAsyncAction.java | 2 +- .../action/search/SearchRequest.java | 18 + .../action/search/SearchRequestBuilder.java | 11 + .../action/search/SearchResponse.java | 37 +- .../action/search/SearchResponseMerger.java | 3 +- .../search/SearchScrollAsyncAction.java | 22 +- .../action/search/SearchShardIterator.java | 25 + .../action/search/SearchTransportService.java | 31 +- .../search/TransportClearScrollAction.java | 8 +- .../action/search/TransportSearchAction.java | 285 ++++++-- .../action/search/TransportSearchHelper.java | 16 +- .../elasticsearch/client/node/NodeClient.java | 10 +- .../cluster/routing/OperationRouting.java | 5 + .../elasticsearch/index/engine/Engine.java | 115 ++- .../index/query/NestedQueryBuilder.java | 7 +- .../index/search/stats/ShardSearchStats.java | 11 +- .../elasticsearch/index/shard/IndexShard.java | 17 +- .../index/shard/SearchOperationListener.java | 49 +- .../java/org/elasticsearch/node/Node.java | 5 +- .../rest/action/search/RestSearchAction.java | 21 +- .../search/DefaultSearchContext.java | 94 +-- .../elasticsearch/search/RescoreDocIds.java | 56 ++ .../search/SearchContextMissingException.java | 10 +- .../search/SearchPhaseResult.java | 26 +- .../elasticsearch/search/SearchService.java | 683 ++++++++++-------- .../search/aggregations/AggregationPhase.java | 2 - .../search/aggregations/AggregatorBase.java | 3 +- .../search/builder/SearchSourceBuilder.java | 115 ++- .../search/dfs/DfsSearchResult.java | 17 +- .../search/fetch/FetchSearchResult.java | 6 +- .../search/fetch/QueryFetchSearchResult.java | 4 +- .../search/fetch/ShardFetchRequest.java | 31 +- .../search/fetch/ShardFetchSearchRequest.java | 49 +- .../search/fetch/subphase/ExplainPhase.java | 2 - .../fetch/subphase/MatchedQueriesPhase.java | 3 - .../internal/FilteredSearchContext.java | 47 +- .../internal/InternalScrollSearchRequest.java | 8 +- .../search/internal/LegacyReaderContext.java | 112 +++ .../search/internal/ReaderContext.java | 203 ++++++ .../search/internal/ScrollContext.java | 25 - .../search/internal/SearchContext.java | 139 ++-- ...ntextId.java => ShardSearchContextId.java} | 9 +- .../search/internal/ShardSearchRequest.java | 60 +- .../search/internal/SubSearchContext.java | 21 - .../search/query/QueryPhase.java | 4 - .../search/query/QuerySearchRequest.java | 31 +- .../search/query/QuerySearchResult.java | 21 +- .../search/rescore/RescoreContext.java | 6 +- .../ElasticsearchExceptionTests.java | 4 +- .../ExceptionSerializationTests.java | 4 +- .../AbstractSearchAsyncActionTests.java | 33 +- .../search/ClearScrollControllerTests.java | 33 +- .../action/search/CountedCollectorTests.java | 5 +- .../action/search/DfsQueryPhaseTests.java | 48 +- .../action/search/FetchSearchPhaseTests.java | 67 +- .../action/search/MockSearchPhaseContext.java | 16 +- .../search/MultiSearchResponseTests.java | 10 +- .../action/search/SearchAsyncActionTests.java | 30 +- .../action/search/SearchContextIdTests.java | 98 +++ .../search/SearchPhaseControllerTests.java | 55 +- .../SearchQueryThenFetchAsyncActionTests.java | 6 +- .../action/search/SearchRequestTests.java | 33 + .../search/SearchScrollAsyncActionTests.java | 80 +- .../search/SearchScrollRequestTests.java | 4 +- .../search/TransportSearchActionTests.java | 27 +- .../search/TransportSearchHelperTests.java | 40 +- .../client/node/NodeClientHeadersTests.java | 4 +- .../elasticsearch/index/IndexModuleTests.java | 9 +- .../shard/SearchOperationListenerTests.java | 36 +- .../indices/cluster/ClusterStateChanges.java | 3 +- .../indices/RestValidateQueryActionTests.java | 4 +- .../search/DefaultSearchContextTests.java | 74 +- .../search/SearchServiceTests.java | 230 +++--- .../search/internal/ScrollContextTests.java | 36 - .../search/query/QueryPhaseTests.java | 10 +- .../search/query/QuerySearchResultTests.java | 17 +- .../snapshots/SnapshotResiliencyTests.java | 5 +- .../search/MockSearchService.java | 22 +- .../aggregations/AggregatorTestCase.java | 2 +- .../elasticsearch/test/TestSearchContext.java | 56 +- .../test/engine/MockInternalEngine.java | 6 + .../search/MockSearchServiceTests.java | 46 +- .../xpack/core/search/PointInTimeIT.java | 273 +++++++ .../elasticsearch/xpack/core/XPackPlugin.java | 10 + .../search/action/ClosePointInTimeAction.java | 19 + .../action/ClosePointInTimeRequest.java | 85 +++ .../action/ClosePointInTimeResponse.java | 22 + .../search/action/OpenPointInTimeAction.java | 18 + .../search/action/OpenPointInTimeRequest.java | 116 +++ .../action/OpenPointInTimeResponse.java | 48 ++ .../action/RestClosePointInTimeAction.java | 40 + .../action/RestOpenPointInTimeAction.java | 44 ++ .../TransportClosePointInTimeAction.java | 55 ++ .../TransportOpenPointInTimeAction.java | 168 +++++ .../AbstractEqlBlockingIntegTestCase.java | 4 +- .../index/engine/FrozenEngine.java | 481 ++---------- .../xpack/frozen/FrozenIndices.java | 9 - .../index/engine/FrozenEngineTests.java | 204 +++--- .../index/engine/FrozenIndexTests.java | 51 +- .../xpack/security/authz/RBACEngine.java | 3 + .../SecuritySearchOperationListener.java | 39 +- .../DocumentLevelSecurityTests.java | 43 ++ .../integration/FieldLevelSecurityTests.java | 54 +- .../SecuritySearchOperationListenerTests.java | 289 ++++---- .../api/close_point_in_time.json | 23 + .../rest-api-spec/api/open_point_in_time.json | 61 ++ .../test/search/point_in_time.yml | 173 +++++ 121 files changed, 4366 insertions(+), 2116 deletions(-) create mode 100644 docs/reference/search/point-in-time.asciidoc create mode 100644 server/src/main/java/org/elasticsearch/action/search/SearchContextId.java rename server/src/main/java/org/elasticsearch/action/search/{ScrollIdForNode.java => SearchContextIdForNode.java} (53%) create mode 100644 server/src/main/java/org/elasticsearch/search/RescoreDocIds.java create mode 100644 server/src/main/java/org/elasticsearch/search/internal/LegacyReaderContext.java create mode 100644 server/src/main/java/org/elasticsearch/search/internal/ReaderContext.java rename server/src/main/java/org/elasticsearch/search/internal/{SearchContextId.java => ShardSearchContextId.java} (89%) create mode 100644 server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java delete mode 100644 server/src/test/java/org/elasticsearch/search/internal/ScrollContextTests.java create mode 100644 x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/search/PointInTimeIT.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/ClosePointInTimeAction.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/ClosePointInTimeRequest.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/ClosePointInTimeResponse.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/OpenPointInTimeAction.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/OpenPointInTimeRequest.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/OpenPointInTimeResponse.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/RestClosePointInTimeAction.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/RestOpenPointInTimeAction.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/TransportClosePointInTimeAction.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/TransportOpenPointInTimeAction.java create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/close_point_in_time.json create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/open_point_in_time.json create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/test/search/point_in_time.yml diff --git a/docs/reference/search/point-in-time.asciidoc b/docs/reference/search/point-in-time.asciidoc new file mode 100644 index 0000000000000..a79ca0f3ad4a7 --- /dev/null +++ b/docs/reference/search/point-in-time.asciidoc @@ -0,0 +1,116 @@ +[role="xpack"] +[testenv="basic"] +[[point-in-time]] +==== Point in time + +A search request by default executes against the most recent visible data of +the target indices, which is called point in time. Elasticsearch pit (point in time) +is a lightweight view into the state of the data as it existed when initiated. +In some cases, it's preferred to perform multiple search requests using +the same point in time. For example, if <> happen between +search_after requests, then the results of those requests might not be consistent as +changes happening between searches are only visible to the more recent point in time. + +A point in time must be opened explicitly before being used in search requests. The +keep_alive parameter tells Elasticsearch how long it should keep a point in time alive, +e.g. `?keep_alive=5m`. + +[source,console] +-------------------------------------------------- +POST /my-index-000001/_pit?keep_alive=1m +-------------------------------------------------- +// TEST[setup:my_index] + +The result from the above request includes a `id`, which should +be passed to the `id` of the `pit` parameter of a search request. + +[source,console] +-------------------------------------------------- +POST /_search <1> +{ + "size": 100, + "query": { + "match" : { + "title" : "elasticsearch" + } + }, + "pit": { + "id": "46ToAwMDaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQNpZHkFdXVpZDIrBm5vZGVfMwAAAAAAAAAAKgFjA2lkeQV1dWlkMioGbm9kZV8yAAAAAAAAAAAMAWICBXV1aWQyAAAFdXVpZDEAAQltYXRjaF9hbGw_gAAAAA==", <2> + "keep_alive": "1m" <3> + } +} +-------------------------------------------------- +// TEST[catch:missing] + +<1> A search request with the `pit` parameter must not specify `index`, `routing`, +and {ref}/search-request-body.html#request-body-search-preference[`preference`] +as these parameters are copied from the point in time. +<2> The `id` parameter tells Elasticsearch to execute the request using contexts +from this point int time. +<3> The `keep_alive` parameter tells Elasticsearch how long it should extend +the time to live of the point in time. + +IMPORTANT: The open point in time request and each subsequent search request can +return different `id`; thus always use the most recently received `id` for the +next search request. + +[[point-in-time-keep-alive]] +===== Keeping point in time alive +The `keep_alive` parameter, which is passed to a open point in time request and +search request, extends the time to live of the corresponding point in time. +The value (e.g. `1m`, see <>) does not need to be long enough to +process all data -- it just needs to be long enough for the next request. + +Normally, the background merge process optimizes the index by merging together +smaller segments to create new, bigger segments. Once the smaller segments are +no longer needed they are deleted. However, open point-in-times prevent the +old segments from being deleted since they are still in use. + +TIP: Keeping older segments alive means that more disk space and file handles +are needed. Ensure that you have configured your nodes to have ample free file +handles. See <>. + +Additionally, if a segment contains deleted or updated documents then the +point in time must keep track of whether each document in the segment was live at +the time of the initial search request. Ensure that your nodes have sufficient heap +space if you have many open point-in-times on an index that is subject to ongoing +deletes or updates. + +You can check how many point-in-times (i.e, search contexts) are open with the +<>: + +[source,console] +--------------------------------------- +GET /_nodes/stats/indices/search +--------------------------------------- + +===== Close point in time API + +Point-in-time is automatically closed when its `keep_alive` has +been elapsed. However keeping point-in-times has a cost, as discussed in the +<>. Point-in-times should be closed +as soon as they are no longer used in search requests. + +[source,console] +--------------------------------------- +DELETE /_pit +{ + "id" : "46ToAwMDaWR4BXV1aWQxAgZub2RlXzEAAAAAAAAAAAEBYQNpZHkFdXVpZDIrBm5vZGVfMwAAAAAAAAAAKgFjA2lkeQV1dWlkMioGbm9kZV8yAAAAAAAAAAAMAWIBBXV1aWQyAAA=" +} +--------------------------------------- +// TEST[catch:missing] + +The API returns the following response: + +[source,console-result] +-------------------------------------------------- +{ + "succeeded": true, <1> + "num_freed": 3 <2> +} +-------------------------------------------------- +// TESTRESPONSE[s/"succeeded": true/"succeeded": $body.succeeded/] +// TESTRESPONSE[s/"num_freed": 3/"num_freed": $body.num_freed/] + +<1> If true, all search contexts associated with the point-in-time id are successfully closed +<2> The number of search contexts have been successfully closed diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java index c37375e1ae4e6..3f438c5bcad20 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentChildInnerHitContextBuilder.java @@ -152,12 +152,8 @@ public TopDocsAndMaxScore topDocs(SearchHit hit) throws IOException { topDocsCollector = TopScoreDocCollector.create(topN, Integer.MAX_VALUE); maxScoreCollector = new MaxScoreCollector(); } - try { - for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { - intersect(weight, innerHitQueryWeight, MultiCollector.wrap(topDocsCollector, maxScoreCollector), ctx); - } - } finally { - clearReleasables(Lifetime.COLLECTION); + for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { + intersect(weight, innerHitQueryWeight, MultiCollector.wrap(topDocsCollector, maxScoreCollector), ctx); } TopDocs topDocs = topDocsCollector.topDocs(from(), size()); float maxScore = Float.NaN; diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/20_keep_alive.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/20_keep_alive.yml index e3b4dcc46230b..ab117eebbf607 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/20_keep_alive.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/scroll/20_keep_alive.yml @@ -34,7 +34,7 @@ search.max_keep_alive: "1m" - do: - catch: /.*Keep alive for scroll.*is too large.*/ + catch: /.*Keep alive for.*is too large.*/ search: rest_total_hits_as_int: true index: test_scroll @@ -61,7 +61,7 @@ - length: {hits.hits: 1 } - do: - catch: /.*Keep alive for scroll.*is too large.*/ + catch: /.*Keep alive for.*is too large.*/ scroll: rest_total_hits_as_int: true scroll_id: $scroll_id diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java index 61a233cf287e7..f26c160895706 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -602,7 +602,7 @@ public void testInvalidScrollKeepAlive() throws IOException { IllegalArgumentException illegalArgumentException = (IllegalArgumentException) ExceptionsHelper.unwrap(exc, IllegalArgumentException.class); assertNotNull(illegalArgumentException); - assertThat(illegalArgumentException.getMessage(), containsString("Keep alive for scroll (2h) is too large")); + assertThat(illegalArgumentException.getMessage(), containsString("Keep alive for request (2h) is too large")); SearchResponse searchResponse = client().prepareSearch() .setQuery(matchAllQuery()) @@ -619,7 +619,7 @@ public void testInvalidScrollKeepAlive() throws IOException { illegalArgumentException = (IllegalArgumentException) ExceptionsHelper.unwrap(exc, IllegalArgumentException.class); assertNotNull(illegalArgumentException); - assertThat(illegalArgumentException.getMessage(), containsString("Keep alive for scroll (3h) is too large")); + assertThat(illegalArgumentException.getMessage(), containsString("Keep alive for request (3h) is too large")); } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 94499f60fc408..c5e1564b5ee2a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -24,6 +24,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.ShardOperationFailedException; @@ -163,7 +164,7 @@ public final void start() { // total hits is null in the response if the tracking of total hits is disabled boolean withTotalHits = trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED; listener.onResponse(new SearchResponse(InternalSearchResponse.empty(withTotalHits), null, 0, 0, 0, buildTookInMillis(), - ShardSearchFailure.EMPTY_ARRAY, clusters)); + ShardSearchFailure.EMPTY_ARRAY, clusters, null)); return; } executePhase(this); @@ -514,22 +515,29 @@ public final SearchRequest getRequest() { return request; } - protected final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, - String scrollId, - ShardSearchFailure[] failures) { + protected final SearchResponse buildSearchResponse(InternalSearchResponse internalSearchResponse, ShardSearchFailure[] failures, + String scrollId, String searchContextId) { return new SearchResponse(internalSearchResponse, scrollId, getNumShards(), successfulOps.get(), - skippedOps.get(), buildTookInMillis(), failures, clusters); + skippedOps.get(), buildTookInMillis(), failures, clusters, searchContextId); + } + + boolean includeSearchContextInResponse() { + return request.pointInTimeBuilder() != null; } @Override - public void sendSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) { + public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults) { ShardSearchFailure[] failures = buildShardFailures(); Boolean allowPartialResults = request.allowPartialSearchResults(); assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; - if (allowPartialResults == false && failures.length > 0){ + if (request.pointInTimeBuilder() == null && allowPartialResults == false && failures.length > 0) { raisePhaseFailure(new SearchPhaseExecutionException("", "Shard failures", null, failures)); } else { - listener.onResponse(buildSearchResponse(internalSearchResponse, scrollId, failures)); + final Version minNodeVersion = clusterState.nodes().getMinNodeVersion(); + final String scrollId = request.scroll() != null ? TransportSearchHelper.buildScrollId(queryResults, minNodeVersion) : null; + final String searchContextId = + includeSearchContextInResponse() ? SearchContextId.encode(queryResults.asList(), aliasFilter, minNodeVersion) : null; + listener.onResponse(buildSearchResponse(internalSearchResponse, failures, scrollId, searchContextId)); } } @@ -598,12 +606,13 @@ public final ShardSearchRequest buildShardSearchRequest(SearchShardIterator shar final String[] routings = indexRoutings.getOrDefault(indexName, Collections.emptySet()) .toArray(new String[0]); ShardSearchRequest shardRequest = new ShardSearchRequest(shardIt.getOriginalIndices(), request, shardIt.shardId(), getNumShards(), - filter, indexBoost, timeProvider.getAbsoluteStartMillis(), shardIt.getClusterAlias(), routings); + filter, indexBoost, timeProvider.getAbsoluteStartMillis(), shardIt.getClusterAlias(), routings, + shardIt.getSearchContextId(), shardIt.getSearchContextKeepAlive()); // if we already received a search result we can inform the shard that it // can return a null response if the request rewrites to match none rather // than creating an empty response in the search thread pool. - // Note that, we have to disable this shortcut for scroll queries. - shardRequest.canReturnNullResponseIfMatchNoDocs(hasShardResponse.get() && request.scroll() == null); + // Note that, we have to disable this shortcut for queries that create a context (scroll and search context). + shardRequest.canReturnNullResponseIfMatchNoDocs(hasShardResponse.get() && shardRequest.scroll() == null); return shardRequest; } @@ -673,8 +682,4 @@ private synchronized Runnable tryQueue(Runnable runnable) { return toExecute; } } - - protected ClusterState clusterState() { - return clusterState; - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java index d0abf798501b3..d79d7a3013062 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollController.java @@ -21,20 +21,28 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.StepListener; +import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportResponse; import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.List; +import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiFunction; +import java.util.stream.Collectors; import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId; -final class ClearScrollController implements Runnable { +public final class ClearScrollController implements Runnable { private final DiscoveryNodes nodes; private final SearchTransportService searchTransportService; private final CountDown expectedOps; @@ -56,19 +64,18 @@ final class ClearScrollController implements Runnable { expectedOps = nodes.getSize(); runner = this::cleanAllScrolls; } else { - List parsedScrollIds = new ArrayList<>(); - for (String parsedScrollId : request.getScrollIds()) { - ScrollIdForNode[] context = parseScrollId(parsedScrollId).getContext(); - for (ScrollIdForNode id : context) { - parsedScrollIds.add(id); - } + // TODO: replace this with #closeContexts + List contexts = new ArrayList<>(); + for (String scrollId : request.getScrollIds()) { + SearchContextIdForNode[] context = parseScrollId(scrollId).getContext(); + Collections.addAll(contexts, context); } - if (parsedScrollIds.isEmpty()) { + if (contexts.isEmpty()) { expectedOps = 0; runner = () -> listener.onResponse(new ClearScrollResponse(true, 0)); } else { - expectedOps = parsedScrollIds.size(); - runner = () -> cleanScrollIds(parsedScrollIds); + expectedOps = contexts.size(); + runner = () -> cleanScrollIds(contexts); } } this.expectedOps = new CountDown(expectedOps); @@ -101,17 +108,17 @@ public void onFailure(Exception e) { } } - void cleanScrollIds(List parsedScrollIds) { - SearchScrollAsyncAction.collectNodesAndRun(parsedScrollIds, nodes, searchTransportService, ActionListener.wrap( + void cleanScrollIds(List contextIds) { + SearchScrollAsyncAction.collectNodesAndRun(contextIds, nodes, searchTransportService, ActionListener.wrap( lookup -> { - for (ScrollIdForNode target : parsedScrollIds) { + for (SearchContextIdForNode target : contextIds) { final DiscoveryNode node = lookup.apply(target.getClusterAlias(), target.getNode()); if (node == null) { onFreedContext(false); } else { try { Transport.Connection connection = searchTransportService.getConnection(target.getClusterAlias(), node); - searchTransportService.sendFreeContext(connection, target.getContextId(), + searchTransportService.sendFreeContext(connection, target.getSearchContextId(), ActionListener.wrap(freed -> onFreedContext(freed.isFreed()), e -> onFailedFreedContext(e, node))); } catch (Exception e) { onFailedFreedContext(e, node); @@ -142,4 +149,45 @@ private void onFailedFreedContext(Throwable e, DiscoveryNode node) { listener.onResponse(new ClearScrollResponse(false, freedSearchContexts.get())); } } + + /** + * Closes the given context id and reports the number of freed contexts via the listener + */ + public static void closeContexts(DiscoveryNodes nodes, SearchTransportService searchTransportService, + Collection contextIds, + ActionListener listener) { + if (contextIds.isEmpty()) { + listener.onResponse(0); + return; + } + final Set clusters = contextIds.stream() + .filter(ctx -> Strings.isEmpty(ctx.getClusterAlias()) == false) + .map(SearchContextIdForNode::getClusterAlias).collect(Collectors.toSet()); + final StepListener> lookupListener = new StepListener<>(); + if (clusters.isEmpty() == false) { + searchTransportService.getRemoteClusterService().collectNodes(clusters, lookupListener); + } else { + lookupListener.onResponse((cluster, nodeId) -> nodes.get(nodeId)); + } + lookupListener.whenComplete(nodeLookup -> { + final GroupedActionListener groupedListener = new GroupedActionListener<>( + ActionListener.delegateFailure(listener, (l, rs) -> l.onResponse(Math.toIntExact(rs.stream().filter(r -> r).count()))), + contextIds.size() + ); + for (SearchContextIdForNode contextId : contextIds) { + final DiscoveryNode node = nodeLookup.apply(contextId.getClusterAlias(), contextId.getNode()); + if (node == null) { + groupedListener.onResponse(false); + } else { + try { + final Transport.Connection connection = searchTransportService.getConnection(contextId.getClusterAlias(), node); + searchTransportService.sendFreeContext(connection, contextId.getSearchContextId(), + ActionListener.wrap(r -> groupedListener.onResponse(r.isFreed()), e -> groupedListener.onResponse(false))); + } catch (Exception e) { + groupedListener.onResponse(false); + } + } + } + }, listener::onFailure); + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index a37fbb0e14d4c..82e5d002bee1f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.dfs.AggregatedDfs; @@ -42,14 +41,15 @@ */ final class DfsQueryPhase extends SearchPhase { private final ArraySearchPhaseResults queryResult; - private final SearchPhaseController searchPhaseController; - private final AtomicArray dfsSearchResults; + private final List searchResults; + private final AggregatedDfs dfs; private final Function, SearchPhase> nextPhaseFactory; private final SearchPhaseContext context; private final SearchTransportService searchTransportService; private final SearchProgressListener progressListener; - DfsQueryPhase(AtomicArray dfsSearchResults, + DfsQueryPhase(List searchResults, + AggregatedDfs dfs, SearchPhaseController searchPhaseController, Function, SearchPhase> nextPhaseFactory, SearchPhaseContext context, Consumer onPartialMergeFailure) { @@ -57,8 +57,8 @@ final class DfsQueryPhase extends SearchPhase { this.progressListener = context.getTask().getProgressListener(); this.queryResult = searchPhaseController.newSearchPhaseResults(context, progressListener, context.getRequest(), context.getNumShards(), onPartialMergeFailure); - this.searchPhaseController = searchPhaseController; - this.dfsSearchResults = dfsSearchResults; + this.searchResults = searchResults; + this.dfs = dfs; this.nextPhaseFactory = nextPhaseFactory; this.context = context; this.searchTransportService = context.getSearchTransport(); @@ -68,16 +68,15 @@ final class DfsQueryPhase extends SearchPhase { public void run() throws IOException { // TODO we can potentially also consume the actual per shard results from the initial phase here in the aggregateDfs // to free up memory early - final List resultList = dfsSearchResults.asList(); - final AggregatedDfs dfs = searchPhaseController.aggregateDfs(resultList); - final CountedCollector counter = new CountedCollector<>(queryResult, - resultList.size(), + final CountedCollector counter = new CountedCollector<>( + queryResult, + searchResults.size(), () -> context.executeNextPhase(this, nextPhaseFactory.apply(queryResult)), context); - for (final DfsSearchResult dfsResult : resultList) { + for (final DfsSearchResult dfsResult : searchResults) { final SearchShardTarget searchShardTarget = dfsResult.getSearchShardTarget(); Transport.Connection connection = context.getConnection(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId()); QuerySearchRequest querySearchRequest = new QuerySearchRequest(searchShardTarget.getOriginalIndices(), - dfsResult.getContextId(), dfs); + dfsResult.getContextId(), dfsResult.getShardSearchRequest(), dfs); final int shardIndex = dfsResult.getShardIndex(); searchTransportService.sendExecuteQuery(connection, querySearchRequest, context.getTask(), new SearchActionListener(searchShardTarget, shardIndex) { diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index f667606917b07..cffbf7ea0a072 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -20,12 +20,14 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.internal.InternalSearchResponse; @@ -42,13 +44,13 @@ final class ExpandSearchPhase extends SearchPhase { private final SearchPhaseContext context; private final InternalSearchResponse searchResponse; - private final String scrollId; + private final AtomicArray queryResults; - ExpandSearchPhase(SearchPhaseContext context, InternalSearchResponse searchResponse, String scrollId) { + ExpandSearchPhase(SearchPhaseContext context, InternalSearchResponse searchResponse, AtomicArray queryResults) { super("expand"); this.context = context; this.searchResponse = searchResponse; - this.scrollId = scrollId; + this.queryResults = queryResults; } /** @@ -110,11 +112,11 @@ public void run() { hit.getInnerHits().put(innerHitBuilder.getName(), innerHits); } } - context.sendSearchResponse(searchResponse, scrollId); + context.sendSearchResponse(searchResponse, queryResults); }, context::onFailure) ); } else { - context.sendSearchResponse(searchResponse, scrollId); + context.sendSearchResponse(searchResponse, queryResults); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 4b2abcf271c83..613761871f4a1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -22,17 +22,18 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.search.ScoreDoc; -import org.elasticsearch.Version; import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.search.RescoreDocIds; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.InternalSearchResponse; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.transport.Transport; @@ -47,26 +48,26 @@ final class FetchSearchPhase extends SearchPhase { private final ArraySearchPhaseResults fetchResults; private final SearchPhaseController searchPhaseController; private final AtomicArray queryResults; - private final BiFunction nextPhaseFactory; + private final BiFunction, SearchPhase> nextPhaseFactory; private final SearchPhaseContext context; private final Logger logger; private final SearchPhaseResults resultConsumer; private final SearchProgressListener progressListener; - private final ClusterState clusterState; + private final AggregatedDfs aggregatedDfs; FetchSearchPhase(SearchPhaseResults resultConsumer, SearchPhaseController searchPhaseController, - SearchPhaseContext context, - ClusterState clusterState) { - this(resultConsumer, searchPhaseController, context, clusterState, - (response, scrollId) -> new ExpandSearchPhase(context, response, scrollId)); + AggregatedDfs aggregatedDfs, + SearchPhaseContext context) { + this(resultConsumer, searchPhaseController, aggregatedDfs, context, + (response, queryPhaseResults) -> new ExpandSearchPhase(context, response, queryPhaseResults)); } FetchSearchPhase(SearchPhaseResults resultConsumer, SearchPhaseController searchPhaseController, + AggregatedDfs aggregatedDfs, SearchPhaseContext context, - ClusterState clusterState, - BiFunction nextPhaseFactory) { + BiFunction, SearchPhase> nextPhaseFactory) { super("fetch"); if (context.getNumShards() != resultConsumer.getNumShards()) { throw new IllegalStateException("number of shards must match the length of the query results but doesn't:" @@ -75,12 +76,12 @@ final class FetchSearchPhase extends SearchPhase { this.fetchResults = new ArraySearchPhaseResults<>(resultConsumer.getNumShards()); this.searchPhaseController = searchPhaseController; this.queryResults = resultConsumer.getAtomicArray(); + this.aggregatedDfs = aggregatedDfs; this.nextPhaseFactory = nextPhaseFactory; this.context = context; this.logger = context.getLogger(); this.resultConsumer = resultConsumer; this.progressListener = context.getTask().getProgressListener(); - this.clusterState = clusterState; } @Override @@ -105,17 +106,10 @@ private void innerRun() throws Exception { final int numShards = context.getNumShards(); final boolean isScrollSearch = context.getRequest().scroll() != null; final List phaseResults = queryResults.asList(); - final String scrollId; - if (isScrollSearch) { - final boolean includeContextUUID = clusterState.nodes().getMinNodeVersion().onOrAfter(Version.V_7_7_0); - scrollId = TransportSearchHelper.buildScrollId(queryResults, includeContextUUID); - } else { - scrollId = null; - } final SearchPhaseController.ReducedQueryPhase reducedQueryPhase = resultConsumer.reduce(); final boolean queryAndFetchOptimization = queryResults.length() == 1; final Runnable finishPhase = () - -> moveToNextPhase(searchPhaseController, scrollId, reducedQueryPhase, queryAndFetchOptimization ? + -> moveToNextPhase(searchPhaseController, queryResults, reducedQueryPhase, queryAndFetchOptimization ? queryResults : fetchResults.getAtomicArray()); if (queryAndFetchOptimization) { assert phaseResults.isEmpty() || phaseResults.get(0).fetchResult() != null : "phaseResults empty [" + phaseResults.isEmpty() @@ -157,7 +151,8 @@ private void innerRun() throws Exception { Transport.Connection connection = context.getConnection(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId()); ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult().getContextId(), i, entry, - lastEmittedDocPerShard, searchShardTarget.getOriginalIndices()); + lastEmittedDocPerShard, searchShardTarget.getOriginalIndices(), queryResult.getShardSearchRequest(), + queryResult.getRescoreDocIds()); executeFetch(i, searchShardTarget, counter, fetchSearchRequest, queryResult.queryResult(), connection); } @@ -166,10 +161,12 @@ private void innerRun() throws Exception { } } - protected ShardFetchSearchRequest createFetchRequest(SearchContextId contextId, int index, IntArrayList entry, - ScoreDoc[] lastEmittedDocPerShard, OriginalIndices originalIndices) { + protected ShardFetchSearchRequest createFetchRequest(ShardSearchContextId contextId, int index, IntArrayList entry, + ScoreDoc[] lastEmittedDocPerShard, OriginalIndices originalIndices, + ShardSearchRequest shardSearchRequest, RescoreDocIds rescoreDocIds) { final ScoreDoc lastEmittedDoc = (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[index] : null; - return new ShardFetchSearchRequest(originalIndices, contextId, entry, lastEmittedDoc); + return new ShardFetchSearchRequest(originalIndices, contextId, shardSearchRequest, entry, lastEmittedDoc, + rescoreDocIds, aggregatedDfs); } private void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, @@ -211,7 +208,9 @@ public void onFailure(Exception e) { private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) { // we only release search context that we did not fetch from if we are not scrolling // and if it has at lease one hit that didn't make it to the global topDocs - if (context.getRequest().scroll() == null && queryResult.hasSearchContext()) { + if (context.getRequest().scroll() == null && + context.getRequest().pointInTimeBuilder() == null && + queryResult.hasSearchContext()) { try { SearchShardTarget searchShardTarget = queryResult.getSearchShardTarget(); Transport.Connection connection = context.getConnection(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId()); @@ -223,10 +222,11 @@ private void releaseIrrelevantSearchContext(QuerySearchResult queryResult) { } private void moveToNextPhase(SearchPhaseController searchPhaseController, - String scrollId, SearchPhaseController.ReducedQueryPhase reducedQueryPhase, + AtomicArray queryPhaseResults, + SearchPhaseController.ReducedQueryPhase reducedQueryPhase, AtomicArray fetchResultsArr) { final InternalSearchResponse internalResponse = searchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr.asList(), fetchResultsArr::get); - context.executeNextPhase(this, nextPhaseFactory.apply(internalResponse, scrollId)); + context.executeNextPhase(this, nextPhaseFactory.apply(internalResponse, queryPhaseResults)); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java b/server/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java index b588827867fbb..43ae39669606b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java +++ b/server/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java @@ -29,9 +29,9 @@ class ParsedScrollId { private final String type; - private final ScrollIdForNode[] context; + private final SearchContextIdForNode[] context; - ParsedScrollId(String source, String type, ScrollIdForNode[] context) { + ParsedScrollId(String source, String type, SearchContextIdForNode[] context) { this.source = source; this.type = type; this.context = context; @@ -45,7 +45,7 @@ public String getType() { return type; } - public ScrollIdForNode[] getContext() { + public SearchContextIdForNode[] getContext() { return context; } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java new file mode 100644 index 0000000000000..cd3e43e05083b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.ByteBufferStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.transport.RemoteClusterAware; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class SearchContextId { + private final Map shards; + private final Map aliasFilter; + + private SearchContextId(Map shards, Map aliasFilter) { + this.shards = shards; + this.aliasFilter = aliasFilter; + } + + public Map shards() { + return shards; + } + + public Map aliasFilter() { + return aliasFilter; + } + + public static String encode(List searchPhaseResults, Map aliasFilter, Version version) { + final Map shards = new HashMap<>(); + for (SearchPhaseResult searchPhaseResult : searchPhaseResults) { + final SearchShardTarget target = searchPhaseResult.getSearchShardTarget(); + shards.put(target.getShardId(), + new SearchContextIdForNode(target.getClusterAlias(), target.getNodeId(), searchPhaseResult.getContextId())); + } + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(version); + Version.writeVersion(version, out); + out.writeMap(shards, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + out.writeMap(aliasFilter, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + return Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(out.bytes())); + } catch (IOException e) { + throw new IllegalArgumentException(e); + } + } + + public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegistry, String id) { + final ByteBuffer byteBuffer; + try { + byteBuffer = ByteBuffer.wrap(Base64.getUrlDecoder().decode(id)); + } catch (Exception e) { + throw new IllegalArgumentException("invalid id: [" + id + "]", e); + } + try (StreamInput in = new NamedWriteableAwareStreamInput(new ByteBufferStreamInput(byteBuffer), namedWriteableRegistry)) { + final Version version = Version.readVersion(in); + in.setVersion(version); + final Map shards = in.readMap(ShardId::new, SearchContextIdForNode::new); + final Map aliasFilters = in.readMap(StreamInput::readString, AliasFilter::new); + if (in.available() > 0) { + throw new IllegalArgumentException("Not all bytes were read"); + } + return new SearchContextId(Collections.unmodifiableMap(shards), Collections.unmodifiableMap(aliasFilters)); + } catch (IOException e) { + throw new IllegalArgumentException(e); + } + } + + public String[] getActualIndices() { + final Set indices = new HashSet<>(); + for (Map.Entry entry : shards().entrySet()) { + final String indexName = entry.getKey().getIndexName(); + final String clusterAlias = entry.getValue().getClusterAlias(); + if (Strings.isEmpty(clusterAlias)) { + indices.add(indexName); + } else { + indices.add(clusterAlias + RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR + indexName); + } + } + return indices.toArray(String[]::new); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java similarity index 53% rename from server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java rename to server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java index d69a10334bd78..e5804e92082dc 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextIdForNode.java @@ -20,17 +20,35 @@ package org.elasticsearch.action.search; import org.elasticsearch.common.Nullable; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.search.internal.ShardSearchContextId; -class ScrollIdForNode { +import java.io.IOException; + +public final class SearchContextIdForNode implements Writeable { private final String node; - private final SearchContextId contextId; + private final ShardSearchContextId searchContextId; private final String clusterAlias; - ScrollIdForNode(@Nullable String clusterAlias, String node, SearchContextId contextId) { + SearchContextIdForNode(@Nullable String clusterAlias, String node, ShardSearchContextId searchContextId) { this.node = node; this.clusterAlias = clusterAlias; - this.contextId = contextId; + this.searchContextId = searchContextId; + } + + SearchContextIdForNode(StreamInput in) throws IOException { + this.node = in.readString(); + this.clusterAlias = in.readOptionalString(); + this.searchContextId = new ShardSearchContextId(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(node); + out.writeOptionalString(clusterAlias); + searchContextId.writeTo(out); } public String getNode() { @@ -42,15 +60,15 @@ public String getClusterAlias() { return clusterAlias; } - public SearchContextId getContextId() { - return contextId; + public ShardSearchContextId getSearchContextId() { + return searchContextId; } @Override public String toString() { - return "ScrollIdForNode{" + + return "SearchContextIdForNode{" + "node='" + node + '\'' + - ", scrollId=" + contextId + + ", seachContextId=" + searchContextId + ", clusterAlias='" + clusterAlias + '\'' + '}'; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 6c55e6fe33266..26a91430fc913 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -25,10 +25,12 @@ import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.transport.Transport; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.Executor; @@ -71,7 +73,10 @@ protected void executePhaseOnShard(final SearchShardIterator shardIt, final Shar @Override protected SearchPhase getNextPhase(final SearchPhaseResults results, final SearchPhaseContext context) { - return new DfsQueryPhase(results.getAtomicArray(), searchPhaseController, (queryResults) -> - new FetchSearchPhase(queryResults, searchPhaseController, context, clusterState()), context, onPartialMergeFailure); + final List dfsSearchResults = results.getAtomicArray().asList(); + final AggregatedDfs aggregatedDfs = searchPhaseController.aggregateDfs(dfsSearchResults); + + return new DfsQueryPhase(dfsSearchResults, aggregatedDfs, searchPhaseController, (queryResults) -> + new FetchSearchPhase(queryResults, searchPhaseController, aggregatedDfs, context), context, onPartialMergeFailure); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java index e22104b8f70af..75ce64dc264eb 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java @@ -21,9 +21,11 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.InternalSearchResponse; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.transport.Transport; @@ -57,10 +59,11 @@ interface SearchPhaseContext extends Executor { /** * Builds and sends the final search response back to the user. + * * @param internalSearchResponse the internal search response - * @param scrollId an optional scroll ID if this search is a scroll search + * @param queryResults the results of the query phase */ - void sendSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId); + void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults); /** * Notifies the top-level listener of the provided exception @@ -101,7 +104,9 @@ interface SearchPhaseContext extends Executor { * @see org.elasticsearch.search.fetch.FetchSearchResult#getContextId() * */ - default void sendReleaseSearchContext(SearchContextId contextId, Transport.Connection connection, OriginalIndices originalIndices) { + default void sendReleaseSearchContext(ShardSearchContextId contextId, + Transport.Connection connection, + OriginalIndices originalIndices) { if (connection != null) { getSearchTransport().sendFreeContext(connection, contextId, originalIndices); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index c282628b0d041..88e3edfc8ed46 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -110,7 +110,7 @@ && getRequest().scroll() == null @Override protected SearchPhase getNextPhase(final SearchPhaseResults results, final SearchPhaseContext context) { - return new FetchSearchPhase(results, searchPhaseController, context, clusterState()); + return new FetchSearchPhase(results, searchPhaseController, null, context); } private ShardSearchRequest rewriteShardSearchRequest(ShardSearchRequest request) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 5b41bf6896b3d..70066394ce11a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -275,6 +275,17 @@ public ActionRequestValidationException validate() { validationException = source.aggregations().validate(validationException); } } + if (pointInTimeBuilder() != null) { + if (scroll) { + validationException = addValidationError("using [point in time] is not allowed in a scroll context", validationException); + } + if (routing() != null) { + validationException = addValidationError("[routing] cannot be used with point in time", validationException); + } + if (preference() != null) { + validationException = addValidationError("[preference] cannot be used with point in time", validationException); + } + } return validationException; } @@ -429,6 +440,13 @@ public SearchSourceBuilder source() { return source; } + public SearchSourceBuilder.PointInTimeBuilder pointInTimeBuilder() { + if (source != null) { + return source.pointInTimeBuilder(); + } + return null; + } + /** * The tye of search to execute. */ diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 42a56b308dafd..ae6b6ec829d05 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -542,6 +542,17 @@ public SearchRequestBuilder setCollapse(CollapseBuilder collapse) { return this; } + /** + * Specifies the search context that Elasticsearch should use to perform the query + * + * @param searchContextId the base64 encoded string of the search context id + * @param keepAlive the extended time to live for the search context + */ + public SearchRequestBuilder setSearchContext(String searchContextId, TimeValue keepAlive) { + sourceBuilder().pointInTimeBuilder(new SearchSourceBuilder.PointInTimeBuilder(searchContextId, keepAlive)); + return this; + } + @Override public String toString() { if (request.source() != null) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 80487710729c9..d832901be62bd 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search; import org.apache.lucene.search.TotalHits; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -61,6 +62,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentObject { private static final ParseField SCROLL_ID = new ParseField("_scroll_id"); + private static final ParseField POINT_IN_TIME_ID = new ParseField("pit_id"); private static final ParseField TOOK = new ParseField("took"); private static final ParseField TIMED_OUT = new ParseField("timed_out"); private static final ParseField TERMINATED_EARLY = new ParseField("terminated_early"); @@ -68,6 +70,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb private final SearchResponseSections internalResponse; private final String scrollId; + private final String pointInTimeId; private final int totalShards; private final int successfulShards; private final int skippedShards; @@ -93,12 +96,24 @@ public SearchResponse(StreamInput in) throws IOException { scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); skippedShards = in.readVInt(); + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + pointInTimeId = in.readOptionalString(); + } else { + pointInTimeId = null; + } } public SearchResponse(SearchResponseSections internalResponse, String scrollId, int totalShards, int successfulShards, int skippedShards, long tookInMillis, ShardSearchFailure[] shardFailures, Clusters clusters) { + this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters, null); + } + + public SearchResponse(SearchResponseSections internalResponse, String scrollId, int totalShards, int successfulShards, + int skippedShards, long tookInMillis, ShardSearchFailure[] shardFailures, Clusters clusters, + String pointInTimeId) { this.internalResponse = internalResponse; this.scrollId = scrollId; + this.pointInTimeId = pointInTimeId; this.clusters = clusters; this.totalShards = totalShards; this.successfulShards = successfulShards; @@ -106,6 +121,8 @@ public SearchResponse(SearchResponseSections internalResponse, String scrollId, this.tookInMillis = tookInMillis; this.shardFailures = shardFailures; assert skippedShards <= totalShards : "skipped: " + skippedShards + " total: " + totalShards; + assert scrollId == null || pointInTimeId == null : + "SearchResponse can't have both scrollId [" + scrollId + "] and searchContextId [" + pointInTimeId + "]"; } @Override @@ -207,6 +224,13 @@ public String getScrollId() { return scrollId; } + /** + * Returns the encoded string of the search context that the search request is used to executed + */ + public String pointInTimeId() { + return pointInTimeId; + } + /** * If profiling was enabled, this returns an object containing the profile results from * each shard. If profiling was not enabled, this will return null @@ -239,6 +263,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t if (scrollId != null) { builder.field(SCROLL_ID.getPreferredName(), scrollId); } + if (pointInTimeId != null) { + builder.field(POINT_IN_TIME_ID.getPreferredName(), pointInTimeId); + } builder.field(TOOK.getPreferredName(), tookInMillis); builder.field(TIMED_OUT.getPreferredName(), isTimedOut()); if (isTerminatedEarly() != null) { @@ -275,6 +302,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE int totalShards = -1; int skippedShards = 0; // 0 for BWC String scrollId = null; + String searchContextId = null; List failures = new ArrayList<>(); Clusters clusters = Clusters.EMPTY; for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { @@ -283,6 +311,8 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE } else if (token.isValue()) { if (SCROLL_ID.match(currentFieldName, parser.getDeprecationHandler())) { scrollId = parser.text(); + } else if (POINT_IN_TIME_ID.match(currentFieldName, parser.getDeprecationHandler())) { + searchContextId = parser.text(); } else if (TOOK.match(currentFieldName, parser.getDeprecationHandler())) { tookInMillis = parser.longValue(); } else if (TIMED_OUT.match(currentFieldName, parser.getDeprecationHandler())) { @@ -361,7 +391,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE SearchResponseSections searchResponseSections = new SearchResponseSections(hits, aggs, suggest, timedOut, terminatedEarly, profile, numReducePhases); return new SearchResponse(searchResponseSections, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, - failures.toArray(ShardSearchFailure.EMPTY_ARRAY), clusters); + failures.toArray(ShardSearchFailure.EMPTY_ARRAY), clusters, searchContextId); } @Override @@ -378,6 +408,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); out.writeVInt(skippedShards); + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeOptionalString(pointInTimeId); + } } @Override @@ -486,6 +519,6 @@ static SearchResponse empty(Supplier tookInMillisSupplier, Clusters cluste InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, InternalAggregations.EMPTY, null, null, false, null, 0); return new SearchResponse(internalSearchResponse, null, 0, 0, 0, tookInMillisSupplier.get(), - ShardSearchFailure.EMPTY_ARRAY, clusters); + ShardSearchFailure.EMPTY_ARRAY, clusters, null); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index 5c1cacab559fd..e28226b77ac8d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -203,7 +203,8 @@ SearchResponse getMergedResponse(Clusters clusters) { InternalSearchResponse response = new InternalSearchResponse(mergedSearchHits, reducedAggs, suggest, profileShardResults, topDocsStats.timedOut, topDocsStats.terminatedEarly, numReducePhases); long tookInMillis = searchTimeProvider.buildTookInMillis(); - return new SearchResponse(response, null, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters); + return new SearchResponse(response, null, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, + clusters, null); } private static final Comparator FAILURES_COMPARATOR = new Comparator() { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java index 83ca45b002893..1db433278620d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java @@ -31,7 +31,7 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.InternalScrollSearchRequest; import org.elasticsearch.search.internal.InternalSearchResponse; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.Transport; @@ -104,7 +104,7 @@ private long buildTookInMillis() { } public final void run() { - final ScrollIdForNode[] context = scrollId.getContext(); + final SearchContextIdForNode[] context = scrollId.getContext(); if (context.length == 0) { listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY)); } else { @@ -117,11 +117,11 @@ public final void run() { * This method collects nodes from the remote clusters asynchronously if any of the scroll IDs references a remote cluster. * Otherwise the action listener will be invoked immediately with a function based on the given discovery nodes. */ - static void collectNodesAndRun(final Iterable scrollIds, DiscoveryNodes nodes, + static void collectNodesAndRun(final Iterable scrollIds, DiscoveryNodes nodes, SearchTransportService searchTransportService, ActionListener> listener) { Set clusters = new HashSet<>(); - for (ScrollIdForNode target : scrollIds) { + for (SearchContextIdForNode target : scrollIds) { if (target.getClusterAlias() != null) { clusters.add(target.getClusterAlias()); } @@ -135,10 +135,10 @@ static void collectNodesAndRun(final Iterable scrollIds, Discov } } - private void run(BiFunction clusterNodeLookup, final ScrollIdForNode[] context) { + private void run(BiFunction clusterNodeLookup, final SearchContextIdForNode[] context) { final CountDown counter = new CountDown(scrollId.getContext().length); for (int i = 0; i < context.length; i++) { - ScrollIdForNode target = context[i]; + SearchContextIdForNode target = context[i]; final int shardIndex = i; final Transport.Connection connection; try { @@ -148,11 +148,11 @@ private void run(BiFunction clusterNodeLookup, fi } connection = getConnection(target.getClusterAlias(), node); } catch (Exception ex) { - onShardFailure("query", counter, target.getContextId(), + onShardFailure("query", counter, target.getSearchContextId(), ex, null, () -> SearchScrollAsyncAction.this.moveToNextPhase(clusterNodeLookup)); continue; } - final InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(target.getContextId(), request); + final InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(target.getSearchContextId(), request); // we can't create a SearchShardTarget here since we don't know the index and shard ID we are talking to // we only know the node and the search context ID. Yet, the response will contain the SearchShardTarget // from the target node instead...that's why we pass null here @@ -192,7 +192,7 @@ protected void innerOnResponse(T result) { @Override public void onFailure(Exception t) { - onShardFailure("query", counter, target.getContextId(), t, null, + onShardFailure("query", counter, target.getSearchContextId(), t, null, () -> SearchScrollAsyncAction.this.moveToNextPhase(clusterNodeLookup)); } }; @@ -242,13 +242,13 @@ protected final void sendResponse(SearchPhaseController.ReducedQueryPhase queryP scrollId = request.scrollId(); } listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(), - 0, buildTookInMillis(), buildShardFailures(), SearchResponse.Clusters.EMPTY)); + 0, buildTookInMillis(), buildShardFailures(), SearchResponse.Clusters.EMPTY, null)); } catch (Exception e) { listener.onFailure(new ReduceSearchPhaseException("fetch", "inner finish failed", e, buildShardFailures())); } } - protected void onShardFailure(String phaseName, final CountDown counter, final SearchContextId searchId, Exception failure, + protected void onShardFailure(String phaseName, final CountDown counter, final ShardSearchContextId searchId, Exception failure, @Nullable SearchShardTarget searchShardTarget, Supplier nextPhaseSupplier) { if (logger.isDebugEnabled()) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java b/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java index ec27af0970545..50a1351c3642f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchShardIterator.java @@ -24,8 +24,10 @@ import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.internal.ShardSearchContextId; import java.util.List; import java.util.Objects; @@ -42,6 +44,9 @@ public final class SearchShardIterator extends PlainShardIterator { private final String clusterAlias; private boolean skip = false; + private final ShardSearchContextId searchContextId; + private final TimeValue searchContextKeepAlive; + /** * Creates a {@link PlainShardIterator} instance that iterates over a subset of the given shards * this the a given shardId. @@ -52,9 +57,18 @@ public final class SearchShardIterator extends PlainShardIterator { * @param originalIndices the indices that the search request originally related to (before any rewriting happened) */ public SearchShardIterator(@Nullable String clusterAlias, ShardId shardId, List shards, OriginalIndices originalIndices) { + this(clusterAlias, shardId, shards, originalIndices, null, null); + } + + public SearchShardIterator(@Nullable String clusterAlias, ShardId shardId, + List shards, OriginalIndices originalIndices, + ShardSearchContextId searchContextId, TimeValue searchContextKeepAlive) { super(shardId, shards); this.originalIndices = originalIndices; this.clusterAlias = clusterAlias; + this.searchContextId = searchContextId; + this.searchContextKeepAlive = searchContextKeepAlive; + assert (searchContextId == null) == (searchContextKeepAlive == null); } /** @@ -80,6 +94,17 @@ SearchShardTarget newSearchShardTarget(String nodeId) { return new SearchShardTarget(nodeId, shardId(), clusterAlias, originalIndices); } + /** + * Returns a non-null value if this request should use a specific search context instead of the latest one. + */ + ShardSearchContextId getSearchContextId() { + return searchContextId; + } + + TimeValue getSearchContextKeepAlive() { + return searchContextKeepAlive; + } + /** * Reset the iterator and mark it as skippable * @see #skip() diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 5cb39d68c3907..dc2dee1203bd9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.search; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; @@ -40,7 +41,7 @@ import org.elasticsearch.search.fetch.ShardFetchRequest; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.InternalScrollSearchRequest; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; @@ -89,7 +90,7 @@ public SearchTransportService(TransportService transportService, this.responseWrapper = responseWrapper; } - public void sendFreeContext(Transport.Connection connection, final SearchContextId contextId, OriginalIndices originalIndices) { + public void sendFreeContext(Transport.Connection connection, final ShardSearchContextId contextId, OriginalIndices originalIndices) { transportService.sendRequest(connection, FREE_CONTEXT_ACTION_NAME, new SearchFreeContextRequest(originalIndices, contextId), TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(new ActionListener() { @Override @@ -104,7 +105,7 @@ public void onFailure(Exception e) { }, SearchFreeContextResponse::new)); } - public void sendFreeContext(Transport.Connection connection, SearchContextId contextId, + public void sendFreeContext(Transport.Connection connection, ShardSearchContextId contextId, ActionListener listener) { transportService.sendRequest(connection, FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextRequest(contextId), TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, SearchFreeContextResponse::new)); @@ -197,15 +198,15 @@ public Map getPendingSearchRequests() { } static class ScrollFreeContextRequest extends TransportRequest { - private SearchContextId contextId; + private ShardSearchContextId contextId; - ScrollFreeContextRequest(SearchContextId contextId) { + ScrollFreeContextRequest(ShardSearchContextId contextId) { this.contextId = Objects.requireNonNull(contextId); } ScrollFreeContextRequest(StreamInput in) throws IOException { super(in); - contextId = new SearchContextId(in); + contextId = new ShardSearchContextId(in); } @Override @@ -214,7 +215,7 @@ public void writeTo(StreamOutput out) throws IOException { contextId.writeTo(out); } - public SearchContextId id() { + public ShardSearchContextId id() { return this.contextId; } @@ -223,7 +224,7 @@ public SearchContextId id() { static class SearchFreeContextRequest extends ScrollFreeContextRequest implements IndicesRequest { private OriginalIndices originalIndices; - SearchFreeContextRequest(OriginalIndices originalIndices, SearchContextId id) { + SearchFreeContextRequest(OriginalIndices originalIndices, ShardSearchContextId id) { super(id); this.originalIndices = originalIndices; } @@ -279,16 +280,20 @@ public void writeTo(StreamOutput out) throws IOException { } } + static boolean keepStatesInContext(Version version) { + return version.before(Version.V_8_0_0); + } + public static void registerRequestHandler(TransportService transportService, SearchService searchService) { transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ThreadPool.Names.SAME, ScrollFreeContextRequest::new, (request, channel, task) -> { - boolean freed = searchService.freeContext(request.id()); + boolean freed = searchService.freeReaderContext(request.id()); channel.sendResponse(new SearchFreeContextResponse(freed)); }); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_SCROLL_ACTION_NAME, SearchFreeContextResponse::new); transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, ThreadPool.Names.SAME, SearchFreeContextRequest::new, (request, channel, task) -> { - boolean freed = searchService.freeContext(request.id()); + boolean freed = searchService.freeReaderContext(request.id()); channel.sendResponse(new SearchFreeContextResponse(freed)); }); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_ACTION_NAME, SearchFreeContextResponse::new); @@ -303,7 +308,7 @@ public static void registerRequestHandler(TransportService transportService, Sea transportService.registerRequestHandler(DFS_ACTION_NAME, ThreadPool.Names.SAME, ShardSearchRequest::new, (request, channel, task) -> - searchService.executeDfsPhase(request, (SearchShardTask) task, + searchService.executeDfsPhase(request, keepStatesInContext(channel.getVersion()), (SearchShardTask) task, new ChannelActionListener<>(channel, DFS_ACTION_NAME, request)) ); @@ -311,7 +316,7 @@ public static void registerRequestHandler(TransportService transportService, Sea transportService.registerRequestHandler(QUERY_ACTION_NAME, ThreadPool.Names.SAME, ShardSearchRequest::new, (request, channel, task) -> { - searchService.executeQueryPhase(request, (SearchShardTask) task, + searchService.executeQueryPhase(request, keepStatesInContext(channel.getVersion()), (SearchShardTask) task, new ChannelActionListener<>(channel, QUERY_ACTION_NAME, request)); }); TransportActionProxy.registerProxyActionWithDynamicResponseType(transportService, QUERY_ACTION_NAME, @@ -368,7 +373,7 @@ public static void registerRequestHandler(TransportService transportService, Sea * @param node the node to resolve * @return a connection to the given node belonging to the cluster with the provided alias. */ - Transport.Connection getConnection(@Nullable String clusterAlias, DiscoveryNode node) { + public Transport.Connection getConnection(@Nullable String clusterAlias, DiscoveryNode node) { if (clusterAlias == null) { return transportService.getConnection(node); } else { diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java index f61d268e551b4..a38fe71a2ea94 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportClearScrollAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -31,18 +32,21 @@ public class TransportClearScrollAction extends HandledTransportAction listener) { - Runnable runnable = new ClearScrollController(request, listener, clusterService.state().nodes(), logger, searchTransportService); + Runnable runnable = new ClearScrollController( + request, listener, clusterService.state().nodes(), logger, searchTransportService); runnable.run(); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index ccc816b031db8..33de77862857f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -34,23 +34,30 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -72,6 +79,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -84,6 +92,8 @@ import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.LongSupplier; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; import static org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction.TASKS_ORIGIN; import static org.elasticsearch.action.search.SearchType.DFS_QUERY_THEN_FETCH; @@ -104,12 +114,19 @@ public class TransportSearchAction extends HandledTransportAction) SearchRequest::new); this.client = client; this.threadPool = threadPool; @@ -120,6 +137,7 @@ public TransportSearchAction(NodeClient client, ThreadPool threadPool, Transport this.clusterService = clusterService; this.searchService = searchService; this.indexNameExpressionResolver = indexNameExpressionResolver; + this.namedWriteableRegistry = namedWriteableRegistry; } private Map buildPerIndexAliasFilter(SearchRequest request, ClusterState clusterState, @@ -203,6 +221,59 @@ long buildTookInMillis() { @Override protected void doExecute(Task task, SearchRequest searchRequest, ActionListener listener) { + executeRequest(task, searchRequest, this::searchAsyncAction, listener); + } + + public interface SinglePhaseSearchAction { + void executeOnShardTarget(SearchTask searchTask, SearchShardTarget target, Transport.Connection connection, + ActionListener listener); + } + + public void executeRequest(Task task, SearchRequest searchRequest, String actionName, + boolean includeSearchContext, SinglePhaseSearchAction phaseSearchAction, + ActionListener listener) { + executeRequest(task, searchRequest, new SearchAsyncActionProvider() { + @Override + public AbstractSearchAsyncAction asyncSearchAction( + SearchTask task, SearchRequest searchRequest, Executor executor, GroupShardsIterator shardsIts, + SearchTimeProvider timeProvider, BiFunction connectionLookup, + ClusterState clusterState, Map aliasFilter, + Map concreteIndexBoosts, Map> indexRoutings, + ActionListener listener, boolean preFilter, ThreadPool threadPool, SearchResponse.Clusters clusters) { + return new AbstractSearchAsyncAction<>( + actionName, logger, searchTransportService, connectionLookup, aliasFilter, concreteIndexBoosts, + indexRoutings, executor, searchRequest, listener, shardsIts, timeProvider, clusterState, task, + new ArraySearchPhaseResults<>(shardsIts.size()), 1, clusters) { + @Override + protected void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting shard, + SearchActionListener listener) { + final Transport.Connection connection = getConnection(shardIt.getClusterAlias(), shard.currentNodeId()); + final SearchShardTarget searchShardTarget = shardIt.newSearchShardTarget(shard.currentNodeId()); + phaseSearchAction.executeOnShardTarget(task, searchShardTarget, connection, listener); + } + + @Override + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + return new SearchPhase(getName()) { + @Override + public void run() { + final AtomicArray atomicArray = results.getAtomicArray(); + sendSearchResponse(InternalSearchResponse.empty(), atomicArray); + } + }; + } + + @Override + boolean includeSearchContextInResponse() { + return includeSearchContext; + } + }; + } + }, listener); + } + + private void executeRequest(Task task, SearchRequest searchRequest, + SearchAsyncActionProvider searchAsyncActionProvider, ActionListener listener) { final long relativeStartNanos = System.nanoTime(); final SearchTimeProvider timeProvider = new SearchTimeProvider(searchRequest.getOrCreateAbsoluteStartMillis(), relativeStartNanos, System::nanoTime); @@ -212,18 +283,27 @@ protected void doExecute(Task task, SearchRequest searchRequest, ActionListener< // situations when source is rewritten to null due to a bug searchRequest.source(source); } - final ClusterState clusterState = clusterService.state(); - final Map remoteClusterIndices = remoteClusterService.groupIndices(searchRequest.indicesOptions(), - searchRequest.indices()); + final SearchContextId searchContext; + final Map remoteClusterIndices; + if (searchRequest.pointInTimeBuilder() != null) { + searchContext = SearchContextId.decode(namedWriteableRegistry, searchRequest.pointInTimeBuilder().getId()); + remoteClusterIndices = getIndicesFromSearchContexts(searchContext, searchRequest.indicesOptions()); + } else { + searchContext = null; + remoteClusterIndices = remoteClusterService.groupIndices(searchRequest.indicesOptions(), searchRequest.indices()); + } OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + final ClusterState clusterState = clusterService.state(); if (remoteClusterIndices.isEmpty()) { - executeLocalSearch(task, timeProvider, searchRequest, localIndices, clusterState, listener); + executeLocalSearch( + task, timeProvider, searchRequest, localIndices, clusterState, listener, searchContext, searchAsyncActionProvider); } else { if (shouldMinimizeRoundtrips(searchRequest)) { ccsRemoteReduce(searchRequest, localIndices, remoteClusterIndices, timeProvider, - searchService.aggReduceContextBuilder(searchRequest), - remoteClusterService, threadPool, listener, - (r, l) -> executeLocalSearch(task, timeProvider, r, localIndices, clusterState, l)); + searchService.aggReduceContextBuilder(searchRequest), + remoteClusterService, threadPool, listener, + (r, l) -> executeLocalSearch( + task, timeProvider, r, localIndices, clusterState, l, searchContext, searchAsyncActionProvider)); } else { AtomicInteger skippedClusters = new AtomicInteger(0); collectSearchShards(searchRequest.indicesOptions(), searchRequest.preference(), searchRequest.routing(), @@ -237,9 +317,10 @@ protected void doExecute(Task task, SearchRequest searchRequest, ActionListener< int localClusters = localIndices == null ? 0 : 1; int totalClusters = remoteClusterIndices.size() + localClusters; int successfulClusters = searchShardsResponses.size() + localClusters; - executeSearch((SearchTask) task, timeProvider, searchRequest, localIndices, - remoteShardIterators, clusterNodeLookup, clusterState, remoteAliasFilters, listener, - new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get())); + executeSearch((SearchTask) task, timeProvider, searchRequest, localIndices, remoteShardIterators, + clusterNodeLookup, clusterState, remoteAliasFilters, listener, + new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get()), + searchContext, searchAsyncActionProvider); }, listener::onFailure)); } @@ -260,6 +341,9 @@ static boolean shouldMinimizeRoundtrips(SearchRequest searchRequest) { if (searchRequest.scroll() != null) { return false; } + if (searchRequest.pointInTimeBuilder() != null) { + return false; + } if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) { return false; } @@ -294,7 +378,8 @@ public void onResponse(SearchResponse searchResponse) { searchResponse.isTimedOut(), searchResponse.isTerminatedEarly(), searchResponse.getNumReducePhases()); listener.onResponse(new SearchResponse(internalSearchResponse, searchResponse.getScrollId(), searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), searchResponse.getSkippedShards(), - timeProvider.buildTookInMillis(), searchResponse.getShardFailures(), new SearchResponse.Clusters(1, 1, 0))); + timeProvider.buildTookInMillis(), searchResponse.getShardFailures(), new SearchResponse.Clusters(1, 1, 0), + searchResponse.pointInTimeId())); } @Override @@ -406,9 +491,12 @@ SearchResponse createFinalResponse() { } private void executeLocalSearch(Task task, SearchTimeProvider timeProvider, SearchRequest searchRequest, OriginalIndices localIndices, - ClusterState clusterState, ActionListener listener) { + ClusterState clusterState, ActionListener listener, + SearchContextId searchContext, + SearchAsyncActionProvider searchAsyncActionProvider) { executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, Collections.emptyList(), - (clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener, SearchResponse.Clusters.EMPTY); + (clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener, SearchResponse.Clusters.EMPTY, + searchContext, searchAsyncActionProvider); } static BiFunction processRemoteShards(Map searchShardsResponses, @@ -470,26 +558,52 @@ private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, Sea OriginalIndices localIndices, List remoteShardIterators, BiFunction remoteConnections, ClusterState clusterState, Map remoteAliasMap, ActionListener listener, - SearchResponse.Clusters clusters) { + SearchResponse.Clusters clusters, @Nullable SearchContextId searchContext, + SearchAsyncActionProvider searchAsyncActionProvider) { clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ); + // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead // of just for the _search api - final Index[] indices = resolveLocalIndices(localIndices, clusterState, timeProvider); - Map aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap); - Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), - searchRequest.indices()); - routingMap = routingMap == null ? Collections.emptyMap() : Collections.unmodifiableMap(routingMap); - String[] concreteIndices = new String[indices.length]; - for (int i = 0; i < indices.length; i++) { - concreteIndices[i] = indices[i].getName(); - } - Map nodeSearchCounts = searchTransportService.getPendingSearchRequests(); - GroupShardsIterator localShardsIterator = clusterService.operationRouting().searchShards(clusterState, + final List localShardIterators; + final Map aliasFilter; + final Map> indexRoutings; + final Executor asyncSearchExecutor; + + boolean preFilterSearchShards; + if (searchContext != null) { + assert searchRequest.pointInTimeBuilder() != null; + aliasFilter = searchContext.aliasFilter(); + indexRoutings = Map.of(); + asyncSearchExecutor = asyncSearchExecutor(localIndices.indices(), clusterState); + localShardIterators = getSearchShardsFromSearchContexts(clusterState, localIndices, searchRequest.getLocalClusterAlias(), + searchContext, searchRequest.pointInTimeBuilder().getKeepAlive()); + preFilterSearchShards = shouldPreFilterSearchShards(clusterState, searchRequest, localIndices.indices(), + localShardIterators.size() + remoteShardIterators.size()); + } else { + final Index[] indices = resolveLocalIndices(localIndices, clusterState, timeProvider); + Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), + searchRequest.indices()); + routingMap = routingMap == null ? Collections.emptyMap() : Collections.unmodifiableMap(routingMap); + final String[] concreteIndices = new String[indices.length]; + for (int i = 0; i < indices.length; i++) { + concreteIndices[i] = indices[i].getName(); + } + asyncSearchExecutor = asyncSearchExecutor(concreteIndices, clusterState); + Map nodeSearchCounts = searchTransportService.getPendingSearchRequests(); + GroupShardsIterator localShardRoutings = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, searchRequest.preference(), searchService.getResponseCollectorService(), nodeSearchCounts); - GroupShardsIterator shardIterators = mergeShardsIterators(localShardsIterator, localIndices, - searchRequest.getLocalClusterAlias(), remoteShardIterators); + localShardIterators = StreamSupport.stream(localShardRoutings.spliterator(), false) + .map(it -> new SearchShardIterator( + searchRequest.getLocalClusterAlias(), it.shardId(), it.getShardRoutings(), localIndices, null, null)) + .collect(Collectors.toList()); + aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap); + indexRoutings = routingMap; + preFilterSearchShards = shouldPreFilterSearchShards(clusterState, searchRequest, concreteIndices, + localShardIterators.size() + remoteShardIterators.size()); + } + final GroupShardsIterator shardIterators = mergeShardsIterators(localShardIterators, remoteShardIterators); failIfOverShardCountLimit(clusterService, shardIterators.size()); @@ -501,7 +615,7 @@ private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, Sea searchRequest.searchType(QUERY_THEN_FETCH); } if (searchRequest.allowPartialSearchResults() == null) { - // No user preference defined in search request - apply cluster service default + // No user preference defined in search request - apply cluster service default searchRequest.allowPartialSearchResults(searchService.defaultAllowPartialSearchResults()); } if (searchRequest.isSuggestOnly()) { @@ -514,19 +628,21 @@ private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, Sea break; } } - final DiscoveryNodes nodes = clusterState.nodes(); BiFunction connectionLookup = buildConnectionLookup(searchRequest.getLocalClusterAlias(), nodes::get, remoteConnections, searchTransportService::getConnection); - boolean preFilterSearchShards = shouldPreFilterSearchShards(clusterState, searchRequest, indices, shardIterators.size()); - final Executor asyncSearchExecutor = asyncSearchExecutor(indices, clusterState); - searchAsyncAction(task, searchRequest, asyncSearchExecutor, shardIterators, timeProvider, connectionLookup, clusterState, - Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, routingMap, listener, preFilterSearchShards, clusters).start(); + searchAsyncActionProvider.asyncSearchAction( + task, searchRequest, asyncSearchExecutor, shardIterators, timeProvider, connectionLookup, clusterState, + Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, indexRoutings, listener, + preFilterSearchShards, threadPool, clusters).start(); } - Executor asyncSearchExecutor(final Index[] indices, final ClusterState clusterState) { - final boolean onlySystemIndices = - Arrays.stream(indices).allMatch(index -> clusterState.metadata().index(index.getName()).isSystem()); + Executor asyncSearchExecutor(final String[] indices, final ClusterState clusterState) { + final boolean onlySystemIndices = Arrays.stream(indices) + .allMatch(index -> { + final IndexMetadata indexMetadata = clusterState.metadata().index(index); + return indexMetadata != null && indexMetadata.isSystem(); + }); return onlySystemIndices ? threadPool.executor(ThreadPool.Names.SYSTEM_READ) : threadPool.executor(ThreadPool.Names.SEARCH); } @@ -554,7 +670,7 @@ static BiFunction buildConnectionLookup(St static boolean shouldPreFilterSearchShards(ClusterState clusterState, SearchRequest searchRequest, - Index[] indices, + String[] indices, int numShards) { SearchSourceBuilder source = searchRequest.source(); Integer preFilterShardSize = searchRequest.getPreFilterShardSize(); @@ -569,9 +685,9 @@ static boolean shouldPreFilterSearchShards(ClusterState clusterState, && preFilterShardSize < numShards; } - private static boolean hasReadOnlyIndices(Index[] indices, ClusterState clusterState) { - for (Index index : indices) { - ClusterBlockException writeBlock = clusterState.blocks().indexBlockedException(ClusterBlockLevel.WRITE, index.getName()); + private static boolean hasReadOnlyIndices(String[] indices, ClusterState clusterState) { + for (String index : indices) { + ClusterBlockException writeBlock = clusterState.blocks().indexBlockedException(ClusterBlockLevel.WRITE, index); if (writeBlock != null) { return true; } @@ -579,29 +695,37 @@ private static boolean hasReadOnlyIndices(Index[] indices, ClusterState clusterS return false; } - static GroupShardsIterator mergeShardsIterators(GroupShardsIterator localShardsIterator, - OriginalIndices localIndices, - @Nullable String localClusterAlias, - List remoteShardIterators) { + static GroupShardsIterator mergeShardsIterators(List localShardIterators, + List remoteShardIterators) { List shards = new ArrayList<>(remoteShardIterators); - for (ShardIterator shardIterator : localShardsIterator) { - shards.add(new SearchShardIterator(localClusterAlias, shardIterator.shardId(), shardIterator.getShardRoutings(), localIndices)); - } + shards.addAll(localShardIterators); return GroupShardsIterator.sortAndCreate(shards); } - private AbstractSearchAsyncAction searchAsyncAction(SearchTask task, SearchRequest searchRequest, - Executor executor, - GroupShardsIterator shardIterators, - SearchTimeProvider timeProvider, - BiFunction connectionLookup, - ClusterState clusterState, - Map aliasFilter, - Map concreteIndexBoosts, - Map> indexRoutings, - ActionListener listener, - boolean preFilter, - SearchResponse.Clusters clusters) { + interface SearchAsyncActionProvider { + AbstractSearchAsyncAction asyncSearchAction( + SearchTask task, SearchRequest searchRequest, Executor executor, GroupShardsIterator shardIterators, + SearchTimeProvider timeProvider, BiFunction connectionLookup, + ClusterState clusterState, Map aliasFilter, Map concreteIndexBoosts, + Map> indexRoutings, ActionListener listener, boolean preFilter, + ThreadPool threadPool, SearchResponse.Clusters clusters); + } + + private AbstractSearchAsyncAction searchAsyncAction( + SearchTask task, + SearchRequest searchRequest, + Executor executor, + GroupShardsIterator shardIterators, + SearchTimeProvider timeProvider, + BiFunction connectionLookup, + ClusterState clusterState, + Map aliasFilter, + Map concreteIndexBoosts, + Map> indexRoutings, + ActionListener listener, + boolean preFilter, + ThreadPool threadPool, + SearchResponse.Clusters clusters) { if (preFilter) { return new CanMatchPreFilterSearchPhase(logger, searchTransportService, connectionLookup, aliasFilter, concreteIndexBoosts, indexRoutings, executor, searchRequest, listener, shardIterators, @@ -619,6 +743,7 @@ private AbstractSearchAsyncAction searchAsyncAction indexRoutings, listener, false, + threadPool, clusters); return new SearchPhase(action.getName()) { @Override @@ -736,4 +861,38 @@ private void maybeFinish() { private static RemoteTransportException wrapRemoteClusterFailure(String clusterAlias, Exception e) { return new RemoteTransportException("error while communicating with remote cluster [" + clusterAlias + "]", e); } + + static Map getIndicesFromSearchContexts(SearchContextId searchContext, + IndicesOptions indicesOptions) { + final Map> indices = new HashMap<>(); + for (Map.Entry entry : searchContext.shards().entrySet()) { + String clusterAlias = entry.getValue().getClusterAlias() == null ? + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY : entry.getValue().getClusterAlias(); + indices.computeIfAbsent(clusterAlias, k -> new HashSet<>()).add(entry.getKey().getIndexName()); + } + return indices.entrySet().stream() + .collect(Collectors.toMap(Map.Entry::getKey, e -> new OriginalIndices(e.getValue().toArray(String[]::new), indicesOptions))); + } + + static List getSearchShardsFromSearchContexts(ClusterState clusterState, OriginalIndices originalIndices, + String localClusterAlias, + SearchContextId searchContext, + TimeValue keepAlive) { + final List iterators = new ArrayList<>(searchContext.shards().size()); + for (Map.Entry entry : searchContext.shards().entrySet()) { + final ShardId shardId = entry.getKey(); + final ShardIterator shards = OperationRouting.getShards(clusterState, shardId); + final List matchingNodeFirstRoutings = new ArrayList<>(); + for (ShardRouting shard : shards) { + if (shard.currentNodeId().equals(entry.getValue().getNode())) { + matchingNodeFirstRoutings.add(0, shard); + } else { + matchingNodeFirstRoutings.add(shard); + } + } + iterators.add(new SearchShardIterator(localClusterAlias, shardId, matchingNodeFirstRoutings, originalIndices, + entry.getValue().getSearchContextId(), keepAlive)); + } + return iterators; + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java index f3755180b1e62..9a01b74579aa2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java @@ -21,26 +21,28 @@ import org.apache.lucene.store.ByteArrayDataInput; import org.apache.lucene.store.RAMOutputStream; +import org.elasticsearch.Version; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.InternalScrollSearchRequest; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.transport.RemoteClusterAware; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Base64; final class TransportSearchHelper { private static final String INCLUDE_CONTEXT_UUID = "include_context_uuid"; - static InternalScrollSearchRequest internalScrollSearchRequest(SearchContextId id, SearchScrollRequest request) { + static InternalScrollSearchRequest internalScrollSearchRequest(ShardSearchContextId id, SearchScrollRequest request) { return new InternalScrollSearchRequest(request, id); } - static String buildScrollId(AtomicArray searchPhaseResults, - boolean includeContextUUID) throws IOException { + static String buildScrollId(AtomicArray searchPhaseResults, Version version) { + boolean includeContextUUID = version.onOrAfter(Version.V_7_7_0); try (RAMOutputStream out = new RAMOutputStream()) { if (includeContextUUID) { out.writeString(INCLUDE_CONTEXT_UUID); @@ -63,6 +65,8 @@ static String buildScrollId(AtomicArray searchPhase byte[] bytes = new byte[(int) out.getFilePointer()]; out.writeTo(bytes, 0); return Base64.getUrlEncoder().encodeToString(bytes); + } catch (IOException e) { + throw new UncheckedIOException(e); } } @@ -80,7 +84,7 @@ static ParsedScrollId parseScrollId(String scrollId) { includeContextUUID = false; type = firstChunk; } - ScrollIdForNode[] context = new ScrollIdForNode[in.readVInt()]; + SearchContextIdForNode[] context = new SearchContextIdForNode[in.readVInt()]; for (int i = 0; i < context.length; ++i) { final String contextUUID = includeContextUUID ? in.readString() : ""; long id = in.readLong(); @@ -93,7 +97,7 @@ static ParsedScrollId parseScrollId(String scrollId) { clusterAlias = target.substring(0, index); target = target.substring(index+1); } - context[i] = new ScrollIdForNode(clusterAlias, target, new SearchContextId(contextUUID, id)); + context[i] = new SearchContextIdForNode(clusterAlias, target, new ShardSearchContextId(contextUUID, id)); } if (in.getPosition() != bytes.length) { throw new IllegalArgumentException("Not all bytes were read"); diff --git a/server/src/main/java/org/elasticsearch/client/node/NodeClient.java b/server/src/main/java/org/elasticsearch/client/node/NodeClient.java index cf4ab92baa0c6..9ea9e5cad73e0 100644 --- a/server/src/main/java/org/elasticsearch/client/node/NodeClient.java +++ b/server/src/main/java/org/elasticsearch/client/node/NodeClient.java @@ -27,6 +27,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.support.AbstractClient; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskListener; @@ -53,6 +54,7 @@ public class NodeClient extends AbstractClient { */ private Supplier localNodeId; private RemoteClusterService remoteClusterService; + private NamedWriteableRegistry namedWriteableRegistry; public NodeClient(Settings settings, ThreadPool threadPool) { super(settings, threadPool); @@ -60,11 +62,12 @@ public NodeClient(Settings settings, ThreadPool threadPool) { @SuppressWarnings("rawtypes") public void initialize(Map actions, TaskManager taskManager, Supplier localNodeId, - RemoteClusterService remoteClusterService) { + RemoteClusterService remoteClusterService, NamedWriteableRegistry namedWriteableRegistry) { this.actions = actions; this.taskManager = taskManager; this.localNodeId = localNodeId; this.remoteClusterService = remoteClusterService; + this.namedWriteableRegistry = namedWriteableRegistry; } @Override @@ -131,4 +134,9 @@ > TransportAction transportAction(ActionType action public Client getRemoteClusterClient(String clusterAlias) { return remoteClusterService.getRemoteClusterClient(threadPool(), clusterAlias); } + + + public NamedWriteableRegistry getNamedWriteableRegistry() { + return namedWriteableRegistry; + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 731eab51186a5..5a8dfbcc06470 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -98,6 +98,11 @@ public GroupShardsIterator searchShards(ClusterState clusterState return GroupShardsIterator.sortAndCreate(new ArrayList<>(set)); } + public static ShardIterator getShards(ClusterState clusterState, ShardId shardId) { + final IndexShardRoutingTable shard = clusterState.routingTable().shardRoutingTable(shardId); + return shard.activeInitializingShardsRandomIt(); + } + private static final Map> EMPTY_ROUTING = Collections.emptyMap(); private Set computeTargetedShards(ClusterState clusterState, String[] concreteIndices, diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index ff5e00694c37c..b79b713ccef53 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -95,6 +95,7 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.BiFunction; +import java.util.function.Function; import java.util.stream.Stream; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; @@ -107,6 +108,7 @@ public abstract class Engine implements Closeable { public static final String FORCE_MERGE_UUID_KEY = "force_merge_uuid"; public static final String MIN_RETAINED_SEQNO = "min_retained_seq_no"; public static final String MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID = "max_unsafe_auto_id_timestamp"; + public static final String CAN_MATCH_SEARCH_SOURCE = "can_match"; // TODO: Make source of search enum? protected final ShardId shardId; protected final String allocationId; @@ -588,31 +590,17 @@ protected final GetResult getFromSearcher(Get get, BiFunction searcherFactory) throws EngineException; - /** - * Returns a new searcher instance. The consumer of this - * API is responsible for releasing the returned searcher in a - * safe manner, preferably in a try/finally block. - * - * @param source the source API or routing that triggers this searcher acquire - * - * @see Searcher#close() + * Acquires a point-in-time reader that can be used to create {@link Engine.Searcher}s on demand. */ - public final Searcher acquireSearcher(String source) throws EngineException { - return acquireSearcher(source, SearcherScope.EXTERNAL); + public final SearcherSupplier acquireSearcherSupplier(Function wrapper) throws EngineException { + return acquireSearcherSupplier(wrapper, SearcherScope.EXTERNAL); } /** - * Returns a new searcher instance. The consumer of this - * API is responsible for releasing the returned searcher in a - * safe manner, preferably in a try/finally block. - * - * @param source the source API or routing that triggers this searcher acquire - * @param scope the scope of this searcher ie. if the searcher will be used for get or search purposes - * - * @see Searcher#close() + * Acquires a point-in-time reader that can be used to create {@link Engine.Searcher}s on demand. */ - public Searcher acquireSearcher(String source, SearcherScope scope) throws EngineException { + public SearcherSupplier acquireSearcherSupplier(Function wrapper, SearcherScope scope) throws EngineException { /* Acquire order here is store -> manager since we need * to make sure that the store is not closed before * the searcher is acquired. */ @@ -621,35 +609,60 @@ public Searcher acquireSearcher(String source, SearcherScope scope) throws Engin } Releasable releasable = store::decRef; try { - assert assertSearcherIsWarmedUp(source, scope); ReferenceManager referenceManager = getReferenceManager(scope); - final ElasticsearchDirectoryReader acquire = referenceManager.acquire(); - AtomicBoolean released = new AtomicBoolean(false); - Searcher engineSearcher = new Searcher(source, acquire, - engineConfig.getSimilarity(), engineConfig.getQueryCache(), engineConfig.getQueryCachingPolicy(), - () -> { - if (released.compareAndSet(false, true)) { + ElasticsearchDirectoryReader acquire = referenceManager.acquire(); + SearcherSupplier reader = new SearcherSupplier(wrapper) { + @Override + public Searcher acquireSearcherInternal(String source) { + assert assertSearcherIsWarmedUp(source, scope); + return new Searcher(source, acquire, engineConfig.getSimilarity(), engineConfig.getQueryCache(), + engineConfig.getQueryCachingPolicy(), () -> {}); + } + + @Override + protected void doClose() { try { referenceManager.release(acquire); + } catch (IOException e) { + throw new UncheckedIOException("failed to close", e); + } catch (AlreadyClosedException e) { + // This means there's a bug somewhere: don't suppress it + throw new AssertionError(e); } finally { store.decRef(); } - } else { - /* In general, readers should never be released twice or this would break reference counting. There is one rare case - * when it might happen though: when the request and the Reaper thread would both try to release it in a very short - * amount of time, this is why we only log a warning instead of throwing an exception. */ - logger.warn("Searcher was released twice", new IllegalStateException("Double release")); } - }); + }; releasable = null; // success - hand over the reference to the engine reader - return engineSearcher; + return reader; } catch (AlreadyClosedException ex) { throw ex; } catch (Exception ex) { - maybeFailEngine("acquire_searcher", ex); + maybeFailEngine("acquire_reader", ex); ensureOpen(ex); // throw EngineCloseException here if we are already closed - logger.error(() -> new ParameterizedMessage("failed to acquire searcher, source {}", source), ex); - throw new EngineException(shardId, "failed to acquire searcher, source " + source, ex); + logger.error(() -> new ParameterizedMessage("failed to acquire reader"), ex); + throw new EngineException(shardId, "failed to acquire reader", ex); + } finally { + Releasables.close(releasable); + } + } + + public final Searcher acquireSearcher(String source) throws EngineException { + return acquireSearcher(source, SearcherScope.EXTERNAL); + } + + public Searcher acquireSearcher(String source, SearcherScope scope) throws EngineException { + return acquireSearcher(source, scope, Function.identity()); + } + + public Searcher acquireSearcher(String source, SearcherScope scope, Function wrapper) throws EngineException { + SearcherSupplier releasable = null; + try { + SearcherSupplier reader = releasable = acquireSearcherSupplier(wrapper, scope); + Searcher searcher = reader.acquireSearcher(source); + releasable = null; + return new Searcher(source, searcher.getDirectoryReader(), searcher.getSimilarity(), + searcher.getQueryCache(), searcher.getQueryCachingPolicy(), () -> Releasables.close(searcher, reader)); } finally { Releasables.close(releasable); } @@ -1158,6 +1171,36 @@ default void onFailedEngine(String reason, @Nullable Exception e) { } } + public abstract static class SearcherSupplier implements Releasable { + private final Function wrapper; + private final AtomicBoolean released = new AtomicBoolean(false); + + public SearcherSupplier(Function wrapper) { + this.wrapper = wrapper; + } + + public final Searcher acquireSearcher(String source) { + if (released.get()) { + throw new AlreadyClosedException("SearcherSupplier was closed"); + } + final Searcher searcher = acquireSearcherInternal(source); + return CAN_MATCH_SEARCH_SOURCE.equals(source) ? searcher : wrapper.apply(searcher); + } + + @Override + public final void close() { + if (released.compareAndSet(false, true)) { + doClose(); + } else { + assert false : "SearchSupplier was released twice"; + } + } + + protected abstract void doClose(); + + protected abstract Searcher acquireSearcherInternal(String source); + } + public static final class Searcher extends IndexSearcher implements Releasable { private final String source; private final Closeable onClose; diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index 8dd230dbae831..c804b66b47a68 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -426,12 +426,7 @@ public TopDocsAndMaxScore topDocs(SearchHit hit) throws IOException { topDocsCollector = TopScoreDocCollector.create(topN, Integer.MAX_VALUE); maxScoreCollector = new MaxScoreCollector(); } - try { - intersect(weight, innerHitQueryWeight, MultiCollector.wrap(topDocsCollector, maxScoreCollector), ctx); - } finally { - clearReleasables(Lifetime.COLLECTION); - } - + intersect(weight, innerHitQueryWeight, MultiCollector.wrap(topDocsCollector, maxScoreCollector), ctx); TopDocs td = topDocsCollector.topDocs(from(), size()); float maxScore = Float.NaN; if (maxScoreCollector != null) { diff --git a/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java b/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java index 34b9c12f50a3f..cc4a58118a2f6 100644 --- a/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java +++ b/server/src/main/java/org/elasticsearch/index/search/stats/ShardSearchStats.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.shard.SearchOperationListener; +import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.SearchContext; import java.util.HashMap; @@ -147,25 +148,25 @@ private StatsHolder groupStats(String group) { } @Override - public void onNewContext(SearchContext context) { + public void onNewReaderContext(ReaderContext readerContext) { openContexts.inc(); } @Override - public void onFreeContext(SearchContext context) { + public void onFreeReaderContext(ReaderContext readerContext) { openContexts.dec(); } @Override - public void onNewScrollContext(SearchContext context) { + public void onNewScrollContext(ReaderContext readerContext) { totalStats.scrollCurrent.inc(); } @Override - public void onFreeScrollContext(SearchContext context) { + public void onFreeScrollContext(ReaderContext readerContext) { totalStats.scrollCurrent.dec(); assert totalStats.scrollCurrent.count() >= 0; - totalStats.scrollMetric.inc(TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - context.getOriginNanoTime())); + totalStats.scrollMetric.inc(TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - readerContext.getStartTimeInNano())); } static final class StatsHolder { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index cda56bae8c2a7..43752278f25e1 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1207,12 +1207,20 @@ public void failShard(String reason, @Nullable Exception e) { } /** - * Acquire a lightweight searcher which can be used to rewrite shard search requests. + * Acquires a point-in-time reader that can be used to create {@link Engine.Searcher}s on demand. */ - public Engine.Searcher acquireCanMatchSearcher() { + public Engine.SearcherSupplier acquireSearcherSupplier() { + return acquireSearcherSupplier(Engine.SearcherScope.EXTERNAL); + } + + /** + * Acquires a point-in-time reader that can be used to create {@link Engine.Searcher}s on demand. + */ + public Engine.SearcherSupplier acquireSearcherSupplier(Engine.SearcherScope scope) { readAllowed(); markSearcherAccessed(); - return getEngine().acquireSearcher("can_match", Engine.SearcherScope.EXTERNAL); + final Engine engine = getEngine(); + return engine.acquireSearcherSupplier(this::wrapSearcher, scope); } public Engine.Searcher acquireSearcher(String source) { @@ -1227,8 +1235,7 @@ private Engine.Searcher acquireSearcher(String source, Engine.SearcherScope scop readAllowed(); markSearcherAccessed(); final Engine engine = getEngine(); - final Engine.Searcher searcher = engine.acquireSearcher(source, scope); - return wrapSearcher(searcher); + return engine.acquireSearcher(source, scope, this::wrapSearcher); } private Engine.Searcher wrapSearcher(Engine.Searcher searcher) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java b/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java index ede86e6ec222d..c0d98b434a300 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java +++ b/server/src/main/java/org/elasticsearch/index/shard/SearchOperationListener.java @@ -21,6 +21,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.transport.TransportRequest; @@ -76,43 +77,43 @@ default void onFailedFetchPhase(SearchContext searchContext) {} default void onFetchPhase(SearchContext searchContext, long tookInNanos) {} /** - * Executed when a new search context was created - * @param context the created context + * Executed when a new reader context was created + * @param readerContext the created context */ - default void onNewContext(SearchContext context) {} + default void onNewReaderContext(ReaderContext readerContext) {} /** - * Executed when a previously created search context is freed. + * Executed when a previously created reader context is freed. * This happens either when the search execution finishes, if the * execution failed or if the search context as idle for and needs to be * cleaned up. - * @param context the freed search context + * @param readerContext the freed reader context */ - default void onFreeContext(SearchContext context) {} + default void onFreeReaderContext(ReaderContext readerContext) {} /** - * Executed when a new scroll search {@link SearchContext} was created - * @param context the created search context + * Executed when a new scroll search {@link ReaderContext} was created + * @param readerContext the created reader context */ - default void onNewScrollContext(SearchContext context) {} + default void onNewScrollContext(ReaderContext readerContext) {} /** * Executed when a scroll search {@link SearchContext} is freed. * This happens either when the scroll search execution finishes, if the * execution failed or if the search context as idle for and needs to be * cleaned up. - * @param context the freed search context + * @param readerContext the freed search context */ - default void onFreeScrollContext(SearchContext context) {} + default void onFreeScrollContext(ReaderContext readerContext) {} /** - * Executed prior to using a {@link SearchContext} that has been retrieved + * Executed prior to using a {@link ReaderContext} that has been retrieved * from the active contexts. If the context is deemed invalid a runtime * exception can be thrown, which will prevent the context from being used. - * @param context the context retrieved from the active contexts + * @param readerContext The reader context used by this request. * @param transportRequest the request that is going to use the search context */ - default void validateSearchContext(SearchContext context, TransportRequest transportRequest) {} + default void validateSearchContext(ReaderContext readerContext, TransportRequest transportRequest) {} /** * A Composite listener that multiplexes calls to each of the listeners methods. @@ -193,10 +194,10 @@ public void onFetchPhase(SearchContext searchContext, long tookInNanos) { } @Override - public void onNewContext(SearchContext context) { + public void onNewReaderContext(ReaderContext readerContext) { for (SearchOperationListener listener : listeners) { try { - listener.onNewContext(context); + listener.onNewReaderContext(readerContext); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("onNewContext listener [{}] failed", listener), e); } @@ -204,10 +205,10 @@ public void onNewContext(SearchContext context) { } @Override - public void onFreeContext(SearchContext context) { + public void onFreeReaderContext(ReaderContext readerContext) { for (SearchOperationListener listener : listeners) { try { - listener.onFreeContext(context); + listener.onFreeReaderContext(readerContext); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("onFreeContext listener [{}] failed", listener), e); } @@ -215,10 +216,10 @@ public void onFreeContext(SearchContext context) { } @Override - public void onNewScrollContext(SearchContext context) { + public void onNewScrollContext(ReaderContext readerContext) { for (SearchOperationListener listener : listeners) { try { - listener.onNewScrollContext(context); + listener.onNewScrollContext(readerContext); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("onNewScrollContext listener [{}] failed", listener), e); } @@ -226,10 +227,10 @@ public void onNewScrollContext(SearchContext context) { } @Override - public void onFreeScrollContext(SearchContext context) { + public void onFreeScrollContext(ReaderContext readerContext) { for (SearchOperationListener listener : listeners) { try { - listener.onFreeScrollContext(context); + listener.onFreeScrollContext(readerContext); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("onFreeScrollContext listener [{}] failed", listener), e); } @@ -237,11 +238,11 @@ public void onFreeScrollContext(SearchContext context) { } @Override - public void validateSearchContext(SearchContext context, TransportRequest request) { + public void validateSearchContext(ReaderContext readerContext, TransportRequest request) { Exception exception = null; for (SearchOperationListener listener : listeners) { try { - listener.validateSearchContext(context, request); + listener.validateSearchContext(readerContext, request); } catch (Exception e) { exception = ExceptionsHelper.useOrSuppress(exception, e); } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 49875acc4b09e..569ba3ceb6c5b 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -668,7 +668,10 @@ protected Node(final Environment initialEnvironment, resourcesToClose.add(injector.getInstance(PeerRecoverySourceService.class)); this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents); client.initialize(injector.getInstance(new Key>() {}), transportService.getTaskManager(), - () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); + () -> clusterService.localNode().getId(), transportService.getRemoteClusterService(), + namedWriteableRegistry + + ); this.namedWriteableRegistry = namedWriteableRegistry; logger.debug("initializing HTTP handlers ..."); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 7e7cb832d7004..4abfbe14ea146 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -20,11 +20,13 @@ package org.elasticsearch.rest.action.search; import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchContextId; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.rest.BaseRestHandler; @@ -98,8 +100,12 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC * company. */ IntConsumer setSize = size -> searchRequest.source().size(size); - request.withContentOrSourceParamParserOrNull(parser -> - parseSearchRequest(searchRequest, request, parser, setSize)); + request.withContentOrSourceParamParserOrNull(parser -> { + parseSearchRequest(searchRequest, request, parser, setSize); + if (searchRequest.pointInTimeBuilder() != null) { + preparePointInTime(searchRequest, client.getNamedWriteableRegistry()); + } + }); return channel -> { RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); @@ -283,6 +289,17 @@ private static void parseSearchSource(final SearchSourceBuilder searchSourceBuil } } + static void preparePointInTime(SearchRequest request, NamedWriteableRegistry namedWriteableRegistry) { + assert request.pointInTimeBuilder() != null; + final IndicesOptions indicesOptions = request.indicesOptions(); + final IndicesOptions stricterIndicesOptions = IndicesOptions.fromOptions( + indicesOptions.ignoreUnavailable(), indicesOptions.allowNoIndices(), false, false, false, + true, true, indicesOptions.ignoreThrottled()); + request.indicesOptions(stricterIndicesOptions); + final SearchContextId searchContextId = SearchContextId.decode(namedWriteableRegistry, request.pointInTimeBuilder().getId()); + request.indices(searchContextId.getActualIndices()); + } + /** * Modify the search request to accurately count the total hits that match the query * if {@link #TOTAL_HITS_AS_INT_PARAM} is set. diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 745a260027b49..6dede1afe5a14 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -30,7 +30,6 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -61,9 +60,10 @@ import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext; import org.elasticsearch.search.fetch.subphase.highlight.SearchHighlightContext; import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QueryPhaseExecutionException; @@ -76,7 +76,6 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -84,12 +83,12 @@ final class DefaultSearchContext extends SearchContext { - private final SearchContextId id; + private final ReaderContext readerContext; + private final Engine.Searcher engineSearcher; private final ShardSearchRequest request; private final SearchShardTarget shardTarget; private final LongSupplier relativeTimeSupplier; private SearchType searchType; - private final Engine.Searcher engineSearcher; private final BigArrays bigArrays; private final IndexShard indexShard; private final ClusterService clusterService; @@ -104,7 +103,6 @@ final class DefaultSearchContext extends SearchContext { // terminate after count private int terminateAfter = DEFAULT_TERMINATE_AFTER; private List groupStats; - private ScrollContext scrollContext; private boolean explain; private boolean version = false; // by default, we don't return versions private boolean seqAndPrimaryTerm = false; @@ -145,9 +143,6 @@ final class DefaultSearchContext extends SearchContext { private SearchHighlightContext highlight; private SuggestionSearchContext suggest; private List rescore; - private volatile long keepAlive; - private final long originNanoTime = System.nanoTime(); - private volatile long lastAccessTime = -1; private Profilers profilers; private final Map searchExtBuilders = new HashMap<>(); @@ -155,29 +150,34 @@ final class DefaultSearchContext extends SearchContext { private final QueryShardContext queryShardContext; private final FetchPhase fetchPhase; - DefaultSearchContext(SearchContextId id, ShardSearchRequest request, SearchShardTarget shardTarget, - Engine.Searcher engineSearcher, ClusterService clusterService, IndexService indexService, - IndexShard indexShard, BigArrays bigArrays, LongSupplier relativeTimeSupplier, TimeValue timeout, - FetchPhase fetchPhase, boolean lowLevelCancellation) throws IOException { - this.id = id; + DefaultSearchContext(ReaderContext readerContext, + ShardSearchRequest request, + SearchShardTarget shardTarget, + ClusterService clusterService, + BigArrays bigArrays, + LongSupplier relativeTimeSupplier, + TimeValue timeout, + FetchPhase fetchPhase, + boolean lowLevelCancellation) throws IOException { + this.readerContext = readerContext; this.request = request; this.fetchPhase = fetchPhase; this.searchType = request.searchType(); this.shardTarget = shardTarget; - this.engineSearcher = engineSearcher; // SearchContexts use a BigArrays that can circuit break this.bigArrays = bigArrays.withCircuitBreaking(); - this.dfsResult = new DfsSearchResult(id, shardTarget); - this.queryResult = new QuerySearchResult(id, shardTarget); - this.fetchResult = new FetchSearchResult(id, shardTarget); - this.indexShard = indexShard; - this.indexService = indexService; + this.dfsResult = new DfsSearchResult(readerContext.id(), shardTarget, request); + this.queryResult = new QuerySearchResult(readerContext.id(), shardTarget, request); + this.fetchResult = new FetchSearchResult(readerContext.id(), shardTarget); + this.indexService = readerContext.indexService(); + this.indexShard = readerContext.indexShard(); this.clusterService = clusterService; + this.engineSearcher = readerContext.acquireSearcher("search"); this.searcher = new ContextIndexSearcher(engineSearcher.getIndexReader(), engineSearcher.getSimilarity(), engineSearcher.getQueryCache(), engineSearcher.getQueryCachingPolicy(), lowLevelCancellation); this.relativeTimeSupplier = relativeTimeSupplier; this.timeout = timeout; - queryShardContext = indexService.newQueryShardContext(request.shardId().id(), searcher, + queryShardContext = indexService.newQueryShardContext(request.shardId().id(), this.searcher, request::nowInMillis, shardTarget.getClusterAlias()); queryBoost = request.indexBoost(); this.lowLevelCancellation = lowLevelCancellation; @@ -185,7 +185,7 @@ final class DefaultSearchContext extends SearchContext { @Override public void doClose() { - Releasables.close(engineSearcher); + engineSearcher.close(); } /** @@ -202,7 +202,7 @@ public void preProcess(boolean rewrite) { int maxResultWindow = indexService.getIndexSettings().getMaxResultWindow(); if (resultWindow > maxResultWindow) { - if (scrollContext == null) { + if (scrollContext() == null) { throw new IllegalArgumentException( "Result window is too large, from + size must be less than or equal to: [" + maxResultWindow + "] but was [" + resultWindow + "]. See the scroll api for a more efficient way to request large data sets. " @@ -219,7 +219,7 @@ public void preProcess(boolean rewrite) { throw new IllegalArgumentException("Cannot use [sort] option in conjunction with [rescore]."); } int maxWindow = indexService.getIndexSettings().getMaxRescoreWindow(); - for (RescoreContext rescoreContext: rescore) { + for (RescoreContext rescoreContext: rescore()) { if (rescoreContext.getWindowSize() > maxWindow) { throw new IllegalArgumentException("Rescore window [" + rescoreContext.getWindowSize() + "] is too large. " + "It must be less than [" + maxWindow + "]. This prevents allocating massive heaps for storing the results " @@ -299,13 +299,13 @@ && new NestedHelper(mapperService()).mightMatchNestedDocs(query) } @Override - public SearchContextId id() { - return this.id; + public ShardSearchContextId id() { + return readerContext.id(); } @Override public String source() { - return engineSearcher.source(); + return "search"; } @Override @@ -333,20 +333,9 @@ public float queryBoost() { return queryBoost; } - @Override - public long getOriginNanoTime() { - return originNanoTime; - } - @Override public ScrollContext scrollContext() { - return this.scrollContext; - } - - @Override - public SearchContext scrollContext(ScrollContext scrollContext) { - this.scrollContext = scrollContext; - return this; + return readerContext.scrollContext(); } @Override @@ -395,7 +384,7 @@ public void suggest(SuggestionSearchContext suggest) { @Override public List rescore() { if (rescore == null) { - return Collections.emptyList(); + return List.of(); } return rescore; } @@ -746,26 +735,6 @@ public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int return this; } - @Override - public void accessed(long accessTime) { - this.lastAccessTime = accessTime; - } - - @Override - public long lastAccessTime() { - return this.lastAccessTime; - } - - @Override - public long keepAlive() { - return this.keepAlive; - } - - @Override - public void keepAlive(long keepAlive) { - this.keepAlive = keepAlive; - } - @Override public DfsSearchResult dfsResult() { return dfsResult; @@ -834,4 +803,9 @@ public SearchShardTask getTask() { public boolean isCancelled() { return task.isCancelled(); } + + @Override + public ReaderContext readerContext() { + return readerContext; + } } diff --git a/server/src/main/java/org/elasticsearch/search/RescoreDocIds.java b/server/src/main/java/org/elasticsearch/search/RescoreDocIds.java new file mode 100644 index 0000000000000..412a8dcee9c38 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/RescoreDocIds.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; + +import java.io.IOException; +import java.util.Map; +import java.util.Set; + +/** + * Since {@link org.elasticsearch.search.internal.SearchContext} no longer hold the states of search, the top K results + * (i.e., documents that will be rescored by query rescorers) need to be serialized/ deserialized between search phases. + * A {@link RescoreDocIds} encapsulates the top K results for each rescorer by its ordinal index. + */ +public final class RescoreDocIds implements Writeable { + public static final RescoreDocIds EMPTY = new RescoreDocIds(Map.of()); + + private final Map> docIds; + + public RescoreDocIds(Map> docIds) { + this.docIds = docIds; + } + + public RescoreDocIds(StreamInput in) throws IOException { + docIds = in.readMap(StreamInput::readVInt, i -> i.readSet(StreamInput::readVInt)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap(docIds, StreamOutput::writeVInt, (o, v) -> o.writeCollection(v, StreamOutput::writeVInt)); + } + + public Set getId(int index) { + return docIds.get(index); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/SearchContextMissingException.java b/server/src/main/java/org/elasticsearch/search/SearchContextMissingException.java index 03ac85a8d81ac..aab7c5a9a9580 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchContextMissingException.java +++ b/server/src/main/java/org/elasticsearch/search/SearchContextMissingException.java @@ -23,20 +23,20 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import java.io.IOException; public class SearchContextMissingException extends ElasticsearchException { - private final SearchContextId contextId; + private final ShardSearchContextId contextId; - public SearchContextMissingException(SearchContextId contextId) { + public SearchContextMissingException(ShardSearchContextId contextId) { super("No search context found for id [" + contextId.getId() + "]"); this.contextId = contextId; } - public SearchContextId contextId() { + public ShardSearchContextId contextId() { return this.contextId; } @@ -47,7 +47,7 @@ public RestStatus status() { public SearchContextMissingException(StreamInput in) throws IOException{ super(in); - contextId = new SearchContextId(in); + contextId = new ShardSearchContextId(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/SearchPhaseResult.java b/server/src/main/java/org/elasticsearch/search/SearchPhaseResult.java index 879110314a741..b2ef32ef3e2ea 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchPhaseResult.java +++ b/server/src/main/java/org/elasticsearch/search/SearchPhaseResult.java @@ -23,7 +23,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.fetch.FetchSearchResult; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.transport.TransportResponse; @@ -41,7 +42,9 @@ public abstract class SearchPhaseResult extends TransportResponse { private SearchShardTarget searchShardTarget; private int shardIndex = -1; - protected SearchContextId contextId; + protected ShardSearchContextId contextId; + private ShardSearchRequest shardSearchRequest; + private RescoreDocIds rescoreDocIds = RescoreDocIds.EMPTY; protected SearchPhaseResult() { @@ -56,7 +59,7 @@ protected SearchPhaseResult(StreamInput in) throws IOException { * or null if no context was created. */ @Nullable - public SearchContextId getContextId() { + public ShardSearchContextId getContextId() { return contextId; } @@ -94,6 +97,23 @@ public QuerySearchResult queryResult() { */ public FetchSearchResult fetchResult() { return null; } + @Nullable + public ShardSearchRequest getShardSearchRequest() { + return shardSearchRequest; + } + + public void setShardSearchRequest(ShardSearchRequest shardSearchRequest) { + this.shardSearchRequest = shardSearchRequest; + } + + public RescoreDocIds getRescoreDocIds() { + return rescoreDocIds; + } + + public void setRescoreDocIds(RescoreDocIds rescoreDocIds) { + this.rescoreDocIds = rescoreDocIds; + } + @Override public void writeTo(StreamOutput out) throws IOException { // TODO: this seems wrong, SearchPhaseResult should have a writeTo? diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 66767ef50b18b..53e06613c444e 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -1,3 +1,4 @@ + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -7,7 +8,7 @@ * not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an @@ -31,14 +32,16 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchShardTask; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedSupplier; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -50,6 +53,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; @@ -63,6 +67,7 @@ import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.SearchOperationListener; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; @@ -91,10 +96,10 @@ import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalScrollSearchRequest; -import org.elasticsearch.search.internal.ScrollContext; +import org.elasticsearch.search.internal.LegacyReaderContext; +import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.SearchContext.Lifetime; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.profile.Profilers; @@ -113,7 +118,6 @@ import org.elasticsearch.threadpool.Scheduler.Cancellable; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportRequest; import java.io.IOException; import java.util.Collections; @@ -127,7 +131,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.LongSupplier; -import java.util.function.Supplier; import static org.elasticsearch.common.unit.TimeValue.timeValueHours; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; @@ -200,7 +203,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private final AtomicLong idGenerator = new AtomicLong(); - private final ConcurrentMapLong activeContexts = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); + private final ConcurrentMapLong activeReaders = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); private final MultiBucketConsumerService multiBucketConsumerService; @@ -245,7 +248,7 @@ public SearchService(ClusterService clusterService, IndicesService indicesServic private void validateKeepAlives(TimeValue defaultKeepAlive, TimeValue maxKeepAlive) { if (defaultKeepAlive.millis() > maxKeepAlive.millis()) { - throw new IllegalArgumentException("Default keep alive setting for scroll [" + DEFAULT_KEEPALIVE_SETTING.getKey() + "]" + + throw new IllegalArgumentException("Default keep alive setting for request [" + DEFAULT_KEEPALIVE_SETTING.getKey() + "]" + " should be smaller than max keep alive [" + MAX_KEEPALIVE_SETTING.getKey() + "], " + "was (" + defaultKeepAlive + " > " + maxKeepAlive + ")"); } @@ -287,16 +290,23 @@ public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRem if (reason == IndexRemovalReason.DELETED || reason == IndexRemovalReason.CLOSED || reason == IndexRemovalReason.REOPENED) { freeAllContextForIndex(index); } - } - protected void putContext(SearchContext context) { - final SearchContext previous = activeContexts.put(context.id().getId(), context); + protected void putReaderContext(ReaderContext context) { + final ReaderContext previous = activeReaders.put(context.id().getId(), context); assert previous == null; + // ensure that if we race against afterIndexRemoved, we remove the context from the active list. + // this is important to ensure store can be cleaned up, in particular if the search is a scroll with a long timeout. + final Index index = context.indexShard().shardId().getIndex(); + if (indicesService.hasIndex(index) == false) { + final ReaderContext removed = removeReaderContext(context.id().getId()); + assert removed == context; + throw new IndexNotFoundException(index); + } } - protected SearchContext removeContext(long id) { - return activeContexts.remove(id); + protected ReaderContext removeReaderContext(long id) { + return activeReaders.remove(id); } @Override @@ -305,8 +315,8 @@ protected void doStart() { @Override protected void doStop() { - for (final SearchContext context : activeContexts.values()) { - freeContext(context.id()); + for (final ReaderContext context : activeReaders.values()) { + freeReaderContext(context.id()); } } @@ -316,14 +326,14 @@ protected void doClose() { keepAliveReaper.cancel(); } - public void executeDfsPhase(ShardSearchRequest request, SearchShardTask task, ActionListener listener) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard shard = indexService.getShard(request.shardId().id()); - rewriteAndFetchShardRequest(shard, request, new ActionListener() { + public void executeDfsPhase(ShardSearchRequest request, boolean keepStatesInContext, + SearchShardTask task, ActionListener listener) { + final IndexShard shard = getShard(request); + rewriteAndFetchShardRequest(shard, request, new ActionListener() { @Override public void onResponse(ShardSearchRequest rewritten) { // fork the execution in the search thread pool - runAsync(shard, () -> executeDfsPhase(request, task), listener); + runAsync(getExecutor(shard), () -> executeDfsPhase(request, task, keepStatesInContext), listener); } @Override @@ -333,20 +343,18 @@ public void onFailure(Exception exc) { }); } - private DfsSearchResult executeDfsPhase(ShardSearchRequest request, SearchShardTask task) throws IOException { - final SearchContext context = createAndPutContext(request, task); - context.incRef(); - try { - contextProcessing(context); + private DfsSearchResult executeDfsPhase(ShardSearchRequest request, + SearchShardTask task, + boolean keepStatesInContext) throws IOException { + ReaderContext readerContext = createOrGetReaderContext(request, keepStatesInContext); + try (Releasable ignored = readerContext.markAsUsed(); + SearchContext context = createContext(readerContext, request, task, true)) { dfsPhase.execute(context); - contextProcessedSuccessfully(context); return context.dfsResult(); } catch (Exception e) { logger.trace("Dfs phase failed", e); - processFailure(context, e); + processFailure(request, readerContext, e); throw e; - } finally { - cleanContext(context); } } @@ -363,37 +371,66 @@ private void loadOrExecuteQueryPhase(final ShardSearchRequest request, final Sea } } - public void executeQueryPhase(ShardSearchRequest request, SearchShardTask task, ActionListener listener) { + public void executeQueryPhase(ShardSearchRequest request, boolean keepStatesInContext, + SearchShardTask task, ActionListener listener) { assert request.canReturnNullResponseIfMatchNoDocs() == false || request.numberOfShards() > 1 : "empty responses require more than one shard"; - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard shard = indexService.getShard(request.shardId().id()); + final IndexShard shard = getShard(request); rewriteAndFetchShardRequest(shard, request, new ActionListener() { @Override public void onResponse(ShardSearchRequest orig) { + final ReaderContext readerContext = createOrGetReaderContext(orig, keepStatesInContext); + final Releasable markAsUsed = readerContext.markAsUsed(); if (orig.canReturnNullResponseIfMatchNoDocs()) { + assert orig.scroll() == null; // we clone the shard request and perform a quick rewrite using a lightweight // searcher since we are outside of the search thread pool. // If the request rewrites to "match none" we can shortcut the query phase // entirely. Otherwise we fork the execution in the search thread pool. ShardSearchRequest canMatchRequest = new ShardSearchRequest(orig); - try (Engine.Searcher searcher = shard.acquireCanMatchSearcher()) { - QueryShardContext context = indexService.newQueryShardContext(canMatchRequest.shardId().id(), searcher, - canMatchRequest::nowInMillis, canMatchRequest.getClusterAlias()); + try (Engine.Searcher searcher = readerContext.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE)) { + QueryShardContext context = readerContext.indexService().newQueryShardContext(canMatchRequest.shardId().id(), + searcher, canMatchRequest::nowInMillis, canMatchRequest.getClusterAlias()); Rewriteable.rewrite(canMatchRequest.getRewriteable(), context, true); } catch (Exception exc) { - listener.onFailure(exc); + try (markAsUsed) { + listener.onFailure(exc); + } finally { + processFailure(request, readerContext, exc); + } return; } if (canRewriteToMatchNone(canMatchRequest.source()) - && canMatchRequest.source().query() instanceof MatchNoneQueryBuilder) { - assert canMatchRequest.scroll() == null : "must always create search context for scroll requests"; - listener.onResponse(QuerySearchResult.nullInstance()); + && canMatchRequest.source().query() instanceof MatchNoneQueryBuilder) { + try (markAsUsed) { + if (orig.readerId() == null) { + try { + listener.onResponse(QuerySearchResult.nullInstance()); + } finally { + // close and remove the ephemeral reader context + removeReaderContext(readerContext.id().getId()); + Releasables.close(readerContext); + } + } else { + listener.onResponse(QuerySearchResult.nullInstance()); + } + } return; } } + // fork the execution in the search thread pool - runAsync(shard, () -> executeQueryPhase(orig, task), listener); + runAsync(getExecutor(shard), () -> { + try (markAsUsed) { + return executeQueryPhase(orig, task, readerContext); + } + }, ActionListener.wrap(listener::onResponse, exc -> { + try (markAsUsed) { + listener.onFailure(exc); + } finally { + processFailure(request, readerContext, exc); + } + })); } @Override @@ -403,47 +440,40 @@ public void onFailure(Exception exc) { }); } - private void runAsync(IndexShard shard, CheckedSupplier command, ActionListener listener) { - Executor executor = getExecutor(shard); - try { - executor.execute(() -> { - T result; - try { - result = command.get(); - } catch (Exception exc) { - listener.onFailure(exc); - return; - } - listener.onResponse(result); - }); - } catch (Exception exc) { - listener.onFailure(exc); + private IndexShard getShard(ShardSearchRequest request) { + if (request.readerId() != null) { + return findReaderContext(request.readerId()).indexShard(); + } else { + return indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); } } - private void runAsync(SearchContextId contextId, Supplier executable, ActionListener listener) { - getExecutor(contextId).execute(ActionRunnable.supply(listener, executable::get)); + private void runAsync(Executor executor, CheckedSupplier executable, ActionListener listener) { + executor.execute(ActionRunnable.supply(listener, executable::get)); } - private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchShardTask task) throws Exception { - final SearchContext context = createAndPutContext(request, task); - context.incRef(); - try { + private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, + SearchShardTask task, + ReaderContext readerContext) throws Exception { + try (SearchContext context = createContext(readerContext, request, task, true)) { final long afterQueryTime; try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context)) { - contextProcessing(context); loadOrExecuteQueryPhase(request, context); - if (context.queryResult().hasSearchContext() == false && context.scrollContext() == null) { - freeContext(context.id()); - } else { - contextProcessedSuccessfully(context); + if (context.queryResult().hasSearchContext() == false && readerContext.singleSession()) { + freeReaderContext(readerContext.id()); } afterQueryTime = executor.success(); } if (request.numberOfShards() == 1) { - return executeFetchPhase(context, afterQueryTime); + return executeFetchPhase(readerContext, context, afterQueryTime); + } else { + // Pass the rescoreDocIds to the queryResult to send them the coordinating node and receive them back in the fetch phase. + // We also pass the rescoreDocIds to the LegacyReaderContext in case the search state needs to stay in the data node. + final RescoreDocIds rescoreDocIds = context.rescoreDocIds(); + context.queryResult().setRescoreDocIds(rescoreDocIds); + readerContext.setRescoreDocIds(rescoreDocIds); + return context.queryResult(); } - return context.queryResult(); } catch (Exception e) { // execution exception can happen while loading the cache, strip it if (e instanceof ExecutionException) { @@ -451,21 +481,17 @@ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchSh (Exception) e.getCause() : new ElasticsearchException(e.getCause()); } logger.trace("Query phase failed", e); - processFailure(context, e); + processFailure(request, readerContext, e); throw e; - } finally { - cleanContext(context); } } - private QueryFetchSearchResult executeFetchPhase(SearchContext context, long afterQueryTime) { + private QueryFetchSearchResult executeFetchPhase(ReaderContext reader, SearchContext context, long afterQueryTime) { try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context, true, afterQueryTime)){ shortcutDocIdsToLoad(context); fetchPhase.execute(context); - if (fetchPhaseShouldFreeContext(context)) { - freeContext(context.id()); - } else { - contextProcessedSuccessfully(context); + if (reader.singleSession()) { + freeReaderContext(reader.id()); } executor.success(); } @@ -475,73 +501,63 @@ private QueryFetchSearchResult executeFetchPhase(SearchContext context, long aft public void executeQueryPhase(InternalScrollSearchRequest request, SearchShardTask task, ActionListener listener) { - runAsync(request.contextId(), () -> { - final SearchContext context = findContext(request.contextId(), request); - context.incRef(); - try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context)) { - context.setTask(task); - contextProcessing(context); - processScroll(request, context); - queryPhase.execute(context); - contextProcessedSuccessfully(context); + final LegacyReaderContext readerContext = (LegacyReaderContext) findReaderContext(request.contextId()); + runAsync(getExecutor(readerContext.indexShard()), () -> { + final ShardSearchRequest shardSearchRequest = readerContext.getShardSearchRequest(null); + try (Releasable ignored = readerContext.markAsUsed(); + SearchContext searchContext = createContext(readerContext, shardSearchRequest, task, false); + SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(searchContext)) { + readerContext.indexShard().getSearchOperationListener().validateSearchContext(readerContext, request); + if (request.scroll() != null && request.scroll().keepAlive() != null) { + final long keepAlive = request.scroll().keepAlive().millis(); + checkKeepAliveLimit(keepAlive); + readerContext.keepAlive(keepAlive); + } + searchContext.searcher().setAggregatedDfs(readerContext.getAggregatedDfs(null)); + processScroll(request, readerContext, searchContext); + queryPhase.execute(searchContext); executor.success(); - return new ScrollQuerySearchResult(context.queryResult(), context.shardTarget()); + readerContext.setRescoreDocIds(searchContext.rescoreDocIds()); + return new ScrollQuerySearchResult(searchContext.queryResult(), searchContext.shardTarget()); } catch (Exception e) { logger.trace("Query phase failed", e); - processFailure(context, e); + processFailure(shardSearchRequest, readerContext, e); throw e; - } finally { - cleanContext(context); } }, listener); } public void executeQueryPhase(QuerySearchRequest request, SearchShardTask task, ActionListener listener) { - runAsync(request.contextId(), () -> { - final SearchContext context = findContext(request.contextId(), request); - context.setTask(task); - context.incRef(); - try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context)) { - contextProcessing(context); - context.searcher().setAggregatedDfs(request.dfs()); - queryPhase.execute(context); - if (context.queryResult().hasSearchContext() == false && context.scrollContext() == null) { + final ReaderContext readerContext = findReaderContext(request.contextId()); + runAsync(getExecutor(readerContext.indexShard()), () -> { + final ShardSearchRequest shardSearchRequest = readerContext.getShardSearchRequest(request.shardSearchRequest()); + readerContext.setAggregatedDfs(request.dfs()); + try (Releasable ignored = readerContext.markAsUsed(); + SearchContext searchContext = createContext(readerContext, shardSearchRequest, task, true); + SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(searchContext)) { + readerContext.indexShard().getSearchOperationListener().validateSearchContext(readerContext, request); + searchContext.searcher().setAggregatedDfs(request.dfs()); + queryPhase.execute(searchContext); + if (searchContext.queryResult().hasSearchContext() == false && readerContext.singleSession()) { // no hits, we can release the context since there will be no fetch phase - freeContext(context.id()); - } else { - contextProcessedSuccessfully(context); + freeReaderContext(readerContext.id()); } executor.success(); - return context.queryResult(); + // Pass the rescoreDocIds to the queryResult to send them the coordinating node and receive them back in the fetch phase. + // We also pass the rescoreDocIds to the LegacyReaderContext in case the search state needs to stay in the data node. + final RescoreDocIds rescoreDocIds = searchContext.rescoreDocIds(); + searchContext.queryResult().setRescoreDocIds(rescoreDocIds); + readerContext.setRescoreDocIds(rescoreDocIds); + return searchContext.queryResult(); } catch (Exception e) { + assert TransportActions.isShardNotAvailableException(e) == false : new AssertionError(e); logger.trace("Query phase failed", e); - processFailure(context, e); + processFailure(shardSearchRequest, readerContext, e); throw e; - } finally { - cleanContext(context); } }, listener); } - private boolean fetchPhaseShouldFreeContext(SearchContext context) { - if (context.scrollContext() == null) { - // simple search, no scroll - return true; - } else { - // scroll request, but the scroll was not extended - return context.scrollContext().scroll == null; - } - } - - - final Executor getExecutor(SearchContextId contextId) { - SearchContext context = getContext(contextId); - if (context == null) { - throw new SearchContextMissingException(contextId); - } - return getExecutor(context.indexShard()); - } - private Executor getExecutor(IndexShard indexShard) { assert indexShard != null; final String executorName; @@ -557,137 +573,191 @@ private Executor getExecutor(IndexShard indexShard) { public void executeFetchPhase(InternalScrollSearchRequest request, SearchShardTask task, ActionListener listener) { - runAsync(request.contextId(), () -> { - final SearchContext context = findContext(request.contextId(), request); - context.setTask(task); - context.incRef(); - try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context)){ - contextProcessing(context); - processScroll(request, context); - queryPhase.execute(context); + final LegacyReaderContext readerContext = (LegacyReaderContext) findReaderContext(request.contextId()); + runAsync(getExecutor(readerContext.indexShard()), () -> { + final ShardSearchRequest shardSearchRequest = readerContext.getShardSearchRequest(null); + try (Releasable ignored = readerContext.markAsUsed(); + SearchContext searchContext = createContext(readerContext, shardSearchRequest, task, false); + SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(searchContext)) { + readerContext.indexShard().getSearchOperationListener().validateSearchContext(readerContext, request); + if (request.scroll() != null && request.scroll().keepAlive() != null) { + checkKeepAliveLimit(request.scroll().keepAlive().millis()); + readerContext.keepAlive(request.scroll().keepAlive().millis()); + } + searchContext.assignRescoreDocIds(readerContext.getRescoreDocIds(null)); + searchContext.searcher().setAggregatedDfs(readerContext.getAggregatedDfs(null)); + processScroll(request, readerContext, searchContext); + queryPhase.execute(searchContext); final long afterQueryTime = executor.success(); - QueryFetchSearchResult fetchSearchResult = executeFetchPhase(context, afterQueryTime); - return new ScrollQueryFetchSearchResult(fetchSearchResult, context.shardTarget()); + QueryFetchSearchResult fetchSearchResult = executeFetchPhase(readerContext, searchContext, afterQueryTime); + return new ScrollQueryFetchSearchResult(fetchSearchResult, searchContext.shardTarget()); } catch (Exception e) { + assert TransportActions.isShardNotAvailableException(e) == false : new AssertionError(e); logger.trace("Fetch phase failed", e); - processFailure(context, e); + processFailure(shardSearchRequest, readerContext, e); throw e; - } finally { - cleanContext(context); } }, listener); } public void executeFetchPhase(ShardFetchRequest request, SearchShardTask task, ActionListener listener) { - runAsync(request.contextId(), () -> { - final SearchContext context = findContext(request.contextId(), request); - context.incRef(); - try { - context.setTask(task); - contextProcessing(context); + final ReaderContext readerContext = findReaderContext(request.contextId()); + runAsync(getExecutor(readerContext.indexShard()), () -> { + final ShardSearchRequest shardSearchRequest = readerContext.getShardSearchRequest(request.getShardSearchRequest()); + try (Releasable ignored = readerContext.markAsUsed(); + SearchContext searchContext = createContext(readerContext, shardSearchRequest, task, false)) { + readerContext.indexShard().getSearchOperationListener().validateSearchContext(readerContext, request); if (request.lastEmittedDoc() != null) { - context.scrollContext().lastEmittedDoc = request.lastEmittedDoc(); + searchContext.scrollContext().lastEmittedDoc = request.lastEmittedDoc(); } - context.docIdsToLoad(request.docIds(), 0, request.docIdsSize()); - try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context, true, System.nanoTime())) { - fetchPhase.execute(context); - if (fetchPhaseShouldFreeContext(context)) { - freeContext(request.contextId()); - } else { - contextProcessedSuccessfully(context); + searchContext.assignRescoreDocIds(readerContext.getRescoreDocIds(request.getRescoreDocIds())); + searchContext.searcher().setAggregatedDfs(readerContext.getAggregatedDfs(request.getAggregatedDfs())); + searchContext.docIdsToLoad(request.docIds(), 0, request.docIdsSize()); + try (SearchOperationListenerExecutor executor = + new SearchOperationListenerExecutor(searchContext, true, System.nanoTime())) { + fetchPhase.execute(searchContext); + if (readerContext.singleSession()) { + freeReaderContext(request.contextId()); } executor.success(); } - return context.fetchResult(); + return searchContext.fetchResult(); } catch (Exception e) { + assert TransportActions.isShardNotAvailableException(e) == false : new AssertionError(e); logger.trace("Fetch phase failed", e); - processFailure(context, e); + processFailure(shardSearchRequest, readerContext, e); throw e; - } finally { - cleanContext(context); } }, listener); } - private SearchContext getContext(SearchContextId contextId) { - final SearchContext context = activeContexts.get(contextId.getId()); - if (context == null) { + private ReaderContext getReaderContext(ShardSearchContextId id) { + final ReaderContext reader = activeReaders.get(id.getId()); + if (reader == null) { return null; } - if (context.id().getReaderId().equals(contextId.getReaderId()) || contextId.getReaderId().isEmpty()) { - return context; + if (reader.id().getReaderId().equals(id.getReaderId()) || id.getReaderId().isEmpty()) { + return reader; } return null; } - private SearchContext findContext(SearchContextId contextId, TransportRequest request) throws SearchContextMissingException { - final SearchContext context = getContext(contextId); - if (context == null) { - throw new SearchContextMissingException(contextId); - } - - SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); - try { - operationListener.validateSearchContext(context, request); - return context; - } catch (Exception e) { - processFailure(context, e); - throw e; + private ReaderContext findReaderContext(ShardSearchContextId id) throws SearchContextMissingException { + final ReaderContext reader = getReaderContext(id); + if (reader == null) { + throw new SearchContextMissingException(id); } + return reader; } - final SearchContext createAndPutContext(ShardSearchRequest request, SearchShardTask task) throws IOException { - SearchContext context = createContext(request, task); - onNewContext(context); - boolean success = false; - try { - putContext(context); - // ensure that if we race against afterIndexRemoved, we free the context here. - // this is important to ensure store can be cleaned up, in particular if the search is a scroll with a long timeout. - indicesService.indexServiceSafe(request.shardId().getIndex()); - success = true; - return context; - } finally { - if (success == false) { - freeContext(context.id()); - } + final ReaderContext createOrGetReaderContext(ShardSearchRequest request, boolean keepStatesInContext) { + if (request.readerId() != null) { + assert keepStatesInContext == false; + final ReaderContext readerContext = findReaderContext(request.readerId()); + readerContext.indexShard().getSearchOperationListener().validateSearchContext(readerContext, request); + final long keepAlive = request.keepAlive().millis(); + checkKeepAliveLimit(keepAlive); + readerContext.keepAlive(keepAlive); + return readerContext; } + IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard shard = indexService.getShard(request.shardId().id()); + Engine.SearcherSupplier reader = shard.acquireSearcherSupplier(); + return createAndPutReaderContext(request, indexService, shard, reader, keepStatesInContext); } - private void onNewContext(SearchContext context) { - boolean success = false; + final ReaderContext createAndPutReaderContext(ShardSearchRequest request, IndexService indexService, IndexShard shard, + Engine.SearcherSupplier reader, boolean keepStatesInContext) { + assert request.readerId() == null; + assert request.keepAlive() == null; + ReaderContext readerContext = null; + Releasable decreaseScrollContexts = null; try { - if (context.scrollContext() != null) { - context.indexShard().getSearchOperationListener().onNewScrollContext(context); + if (request.scroll() != null) { + decreaseScrollContexts = openScrollContexts::decrementAndGet; + if (openScrollContexts.incrementAndGet() > maxOpenScrollContext) { + throw new ElasticsearchException( + "Trying to create too many scroll contexts. Must be less than or equal to: [" + + maxOpenScrollContext + "]. " + "This limit can be set by changing the [" + + MAX_OPEN_SCROLL_CONTEXT.getKey() + "] setting."); + } } - context.indexShard().getSearchOperationListener().onNewContext(context); - success = true; - } finally { - // currently, the concrete listener is CompositeListener, which swallows exceptions, but here we anyway try to do the - // right thing by closing and notifying onFreeXXX in case one of the listeners fails with an exception in the future. - if (success == false) { - try (context) { - onFreeContext(context); + final long keepAlive = getKeepAlive(request); + checkKeepAliveLimit(keepAlive); + if (keepStatesInContext || request.scroll() != null) { + readerContext = new LegacyReaderContext(idGenerator.incrementAndGet(), indexService, shard, reader, request, keepAlive); + if (request.scroll() != null) { + readerContext.addOnClose(decreaseScrollContexts); + decreaseScrollContexts = null; } + } else { + readerContext = new ReaderContext(idGenerator.incrementAndGet(), indexService, shard, reader, keepAlive, + request.keepAlive() == null); + } + reader = null; + final ReaderContext finalReaderContext = readerContext; + final SearchOperationListener searchOperationListener = shard.getSearchOperationListener(); + searchOperationListener.onNewReaderContext(finalReaderContext); + if (finalReaderContext.scrollContext() != null) { + searchOperationListener.onNewScrollContext(finalReaderContext); } + readerContext.addOnClose(() -> { + try { + if (finalReaderContext.scrollContext() != null) { + searchOperationListener.onFreeScrollContext(finalReaderContext); + } + } finally { + searchOperationListener.onFreeReaderContext(finalReaderContext); + } + }); + putReaderContext(finalReaderContext); + readerContext = null; + return finalReaderContext; + } finally { + Releasables.close(reader, readerContext, decreaseScrollContexts); } } - final SearchContext createContext(ShardSearchRequest request, SearchShardTask searchTask) throws IOException { - final DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout); + /** + * Opens the reader context for given shardId. The newly opened reader context will be keep + * until the {@code keepAlive} elapsed unless it is manually released. + */ + public void openReaderContext(ShardId shardId, TimeValue keepAlive, ActionListener listener) { + checkKeepAliveLimit(keepAlive.millis()); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard shard = indexService.getShard(shardId.id()); + final SearchOperationListener searchOperationListener = shard.getSearchOperationListener(); + shard.awaitShardSearchActive(ignored -> { + Engine.SearcherSupplier searcherSupplier = null; + ReaderContext readerContext = null; + try { + searcherSupplier = shard.acquireSearcherSupplier(); + readerContext = new ReaderContext( + idGenerator.incrementAndGet(), indexService, shard, searcherSupplier, keepAlive.millis(), false); + final ReaderContext finalReaderContext = readerContext; + searcherSupplier = null; // transfer ownership to reader context + searchOperationListener.onNewReaderContext(readerContext); + readerContext.addOnClose(() -> searchOperationListener.onFreeReaderContext(finalReaderContext)); + putReaderContext(readerContext); + readerContext = null; + listener.onResponse(finalReaderContext.id()); + } catch (Exception exc) { + Releasables.closeWhileHandlingException(searcherSupplier, readerContext); + listener.onFailure(exc); + } + }); + } + + final SearchContext createContext(ReaderContext readerContext, + ShardSearchRequest request, + SearchShardTask task, + boolean includeAggregations) throws IOException { + final DefaultSearchContext context = createSearchContext(readerContext, request, defaultSearchTimeout); try { if (request.scroll() != null) { - context.addReleasable(openScrollContexts::decrementAndGet, Lifetime.CONTEXT); - if (openScrollContexts.incrementAndGet() > maxOpenScrollContext) { - throw new ElasticsearchException( - "Trying to create too many scroll contexts. Must be less than or equal to: [" + - maxOpenScrollContext + "]. " + "This limit can be set by changing the [" - + MAX_OPEN_SCROLL_CONTEXT.getKey() + "] setting."); - } - context.scrollContext(new ScrollContext()); context.scrollContext().scroll = request.scroll(); } - parseSource(context, request.source()); + parseSource(context, request.source(), includeAggregations); // if the from and size are still not set, default them if (context.from() == -1) { @@ -696,19 +766,12 @@ final SearchContext createContext(ShardSearchRequest request, SearchShardTask se if (context.size() == -1) { context.size(DEFAULT_SIZE); } - context.setTask(searchTask); + context.setTask(task); // pre process dfsPhase.preProcess(context); queryPhase.preProcess(context); fetchPhase.preProcess(context); - - // compute the context keep alive - long keepAlive = defaultKeepAlive; - if (request.scroll() != null && request.scroll().keepAlive() != null) { - keepAlive = request.scroll().keepAlive().millis(); - } - contextScrollKeepAlive(context, keepAlive); } catch (Exception e) { context.close(); throw e; @@ -718,30 +781,25 @@ final SearchContext createContext(ShardSearchRequest request, SearchShardTask se } public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout) throws IOException { - return createSearchContext(request, timeout, "search"); + final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + final IndexShard indexShard = indexService.getShard(request.shardId().getId()); + final Engine.SearcherSupplier reader = indexShard.acquireSearcherSupplier(); + try (ReaderContext readerContext = new ReaderContext(idGenerator.incrementAndGet(), indexService, indexShard, reader, -1L, true)) { + DefaultSearchContext searchContext = createSearchContext(readerContext, request, timeout); + searchContext.addReleasable(readerContext.markAsUsed()); + return searchContext; + } } - private DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, String source) throws IOException { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().getId()); - SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), - indexShard.shardId(), request.getClusterAlias(), OriginalIndices.NONE); - Engine.Searcher searcher = indexShard.acquireSearcher(source); - + private DefaultSearchContext createSearchContext(ReaderContext reader, ShardSearchRequest request, TimeValue timeout) + throws IOException { boolean success = false; DefaultSearchContext searchContext = null; try { - // TODO: If no changes are made since the last commit, and the searcher is opened from that commit, then we can use the - // commit_id as the context_id. And if the local checkpoint and max_seq_no of that commit equal the global checkpoint, - // then we can use a combination of history_uuid and one of these values as a **weaker** context_id. - // Reader contexts with the same commit_id can be replaced at any time, as the Lucene doc ids are the same. - // Reader contexts with the same seq_id, however, can't be replaced between the query and fetch phase because - // the Lucene doc ids can be different. - final String readerId = UUIDs.base64UUID(); - searchContext = new DefaultSearchContext( - new SearchContextId(readerId, idGenerator.incrementAndGet()), - request, shardTarget, searcher, clusterService, indexService, indexShard, bigArrays, - threadPool::relativeTimeInMillis, timeout, fetchPhase, lowLevelCancellation); + SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), + reader.indexShard().shardId(), request.getClusterAlias(), OriginalIndices.NONE); + searchContext = new DefaultSearchContext(reader, request, shardTarget, clusterService, + bigArrays, threadPool::relativeTimeInMillis, timeout, fetchPhase, lowLevelCancellation); // we clone the query shard context here just for rewriting otherwise we // might end up with incorrect state since we are using now() or script services // during rewrite and normalized / evaluate templates etc. @@ -751,83 +809,63 @@ private DefaultSearchContext createSearchContext(ShardSearchRequest request, Tim success = true; } finally { if (success == false) { - // we handle the case where the DefaultSearchContext constructor throws an exception since we would otherwise - // leak a searcher and this can have severe implications (unable to obtain shard lock exceptions). - IOUtils.closeWhileHandlingException(searcher); + // we handle the case where `IndicesService#indexServiceSafe`or `IndexService#getShard`, or the DefaultSearchContext + // constructor throws an exception since we would otherwise leak a searcher and this can have severe implications + // (unable to obtain shard lock exceptions). + IOUtils.closeWhileHandlingException(searchContext); } } return searchContext; } - private void freeAllContextForIndex(Index index) { assert index != null; - for (SearchContext ctx : activeContexts.values()) { + for (ReaderContext ctx : activeReaders.values()) { if (index.equals(ctx.indexShard().shardId().getIndex())) { - freeContext(ctx.id()); + freeReaderContext(ctx.id()); } } } - public boolean freeContext(SearchContextId contextId) { - if (getContext(contextId) != null) { - try (SearchContext context = removeContext(contextId.getId())) { - if (context != null) { - onFreeContext(context); - return true; - } + public boolean freeReaderContext(ShardSearchContextId contextId) { + if (getReaderContext(contextId) != null) { + try (ReaderContext context = removeReaderContext(contextId.getId())) { + return context != null; } } return false; } - private void onFreeContext(SearchContext context) { - assert context.refCount() > 0 : " refCount must be > 0: " + context.refCount(); - assert activeContexts.containsKey(context.id().getId()) == false; - context.indexShard().getSearchOperationListener().onFreeContext(context); - if (context.scrollContext() != null) { - context.indexShard().getSearchOperationListener().onFreeScrollContext(context); + public void freeAllScrollContexts() { + for (ReaderContext readerContext : activeReaders.values()) { + if (readerContext.scrollContext() != null) { + freeReaderContext(readerContext.id()); + } } } - public void freeAllScrollContexts() { - for (SearchContext searchContext : activeContexts.values()) { - if (searchContext.scrollContext() != null) { - freeContext(searchContext.id()); - } + private long getKeepAlive(ShardSearchRequest request) { + if (request.scroll() != null && request.scroll().keepAlive() != null) { + return request.scroll().keepAlive().millis(); + } else { + return defaultKeepAlive; } } - private void contextScrollKeepAlive(SearchContext context, long keepAlive) { + private void checkKeepAliveLimit(long keepAlive) { if (keepAlive > maxKeepAlive) { throw new IllegalArgumentException( - "Keep alive for scroll (" + TimeValue.timeValueMillis(keepAlive) + ") is too large. " + + "Keep alive for request (" + TimeValue.timeValueMillis(keepAlive) + ") is too large. " + "It must be less than (" + TimeValue.timeValueMillis(maxKeepAlive) + "). " + "This limit can be set by changing the [" + MAX_KEEPALIVE_SETTING.getKey() + "] cluster level setting."); } - context.keepAlive(keepAlive); - } - - private void contextProcessing(SearchContext context) { - // disable timeout while executing a search - context.accessed(-1); - } - - private void contextProcessedSuccessfully(SearchContext context) { - context.accessed(threadPool.relativeTimeInMillis()); } - private void cleanContext(SearchContext context) { - try { - context.clearReleasables(Lifetime.PHASE); - context.setTask(null); - } finally { - context.decRef(); + private void processFailure(ShardSearchRequest request, ReaderContext context, Exception e) { + if (context.singleSession() || request.scroll() != null) { + // we release the reader on failure if the request is a normal search or a scroll + freeReaderContext(context.id()); } - } - - private void processFailure(SearchContext context, Exception e) { - freeContext(context.id()); try { if (Lucene.isCorruptionException(e)) { context.indexShard().failShard("search execution corruption failure", e); @@ -838,7 +876,7 @@ private void processFailure(SearchContext context, Exception e) { } } - private void parseSource(DefaultSearchContext context, SearchSourceBuilder source) throws SearchException { + private void parseSource(DefaultSearchContext context, SearchSourceBuilder source, boolean includeAggregations) { // nothing to parse... if (source == null) { return; @@ -894,7 +932,7 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc context.timeout(source.timeout()); } context.terminateAfter(source.terminateAfter()); - if (source.aggregations() != null) { + if (source.aggregations() != null && includeAggregations) { try { AggregatorFactories factories = source.aggregations().build(queryShardContext, null); context.aggregations(new SearchContextAggregations(factories, multiBucketConsumerService.create())); @@ -1067,14 +1105,10 @@ private void shortcutDocIdsToLoad(SearchContext context) { context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); } - private void processScroll(InternalScrollSearchRequest request, SearchContext context) { + private void processScroll(InternalScrollSearchRequest request, ReaderContext reader, SearchContext context) { // process scroll context.from(context.from() + context.size()); context.scrollContext().scroll = request.scroll(); - // update the context keep alive based on the new scroll value - if (request.scroll() != null && request.scroll().keepAlive() != null) { - contextScrollKeepAlive(context, request.scroll().keepAlive().millis()); - } } /** @@ -1082,7 +1116,7 @@ private void processScroll(InternalScrollSearchRequest request, SearchContext co * SearchService */ public int getActiveContexts() { - return this.activeContexts.size(); + return this.activeReaders.size(); } public ResponseCollectorService getResponseCollectorService() { @@ -1092,18 +1126,10 @@ public ResponseCollectorService getResponseCollectorService() { class Reaper implements Runnable { @Override public void run() { - final long time = threadPool.relativeTimeInMillis(); - for (SearchContext context : activeContexts.values()) { - // Use the same value for both checks since lastAccessTime can - // be modified by another thread between checks! - final long lastAccessTime = context.lastAccessTime(); - if (lastAccessTime == -1L) { // its being processed or timeout is disabled - continue; - } - if ((time - lastAccessTime > context.keepAlive())) { - logger.debug("freeing search context [{}], time [{}], lastAccessTime [{}], keepAlive [{}]", context.id(), time, - lastAccessTime, context.keepAlive()); - freeContext(context.id()); + for (ReaderContext context : activeReaders.values()) { + if (context.isExpired()) { + logger.debug("freeing search context [{}]", context.id()); + freeReaderContext(context.id()); } } } @@ -1114,19 +1140,33 @@ public AliasFilter buildAliasFilter(ClusterState state, String index, Setfalse the query won't match any documents on the current - * shard. + * This method uses a lightweight searcher without wrapping (i.e., not open a full reader on frozen indices) to rewrite the query + * to check if the query can match any documents. This method can have false positives while if it returns {@code false} the query + * won't match any documents on the current shard. */ public CanMatchResponse canMatch(ShardSearchRequest request) throws IOException { assert request.searchType() == SearchType.QUERY_THEN_FETCH : "unexpected search type: " + request.searchType(); - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().getId()); - // we don't want to use the reader wrapper since it could run costly operations - // and we can afford false positives. - final boolean hasRefreshPending = indexShard.hasRefreshPending(); - try (Engine.Searcher searcher = indexShard.acquireCanMatchSearcher()) { - QueryShardContext context = indexService.newQueryShardContext(request.shardId().id(), searcher, + final ReaderContext readerContext = request.readerId() != null ? getReaderContext(request.readerId()) : null; + final Releasable markAsUsed = readerContext != null ? readerContext.markAsUsed() : null; + final IndexService indexService; + final Engine.Searcher canMatchSearcher; + final boolean hasRefreshPending; + if (readerContext != null) { + readerContext.indexShard().getSearchOperationListener().validateSearchContext(readerContext, request); + checkKeepAliveLimit(request.keepAlive().millis()); + readerContext.keepAlive(request.keepAlive().millis()); + indexService = readerContext.indexService(); + canMatchSearcher = readerContext.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE); + hasRefreshPending = false; + } else { + indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(request.shardId().getId()); + hasRefreshPending = indexShard.hasRefreshPending(); + canMatchSearcher = indexShard.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE); + } + + try (markAsUsed; canMatchSearcher) { + QueryShardContext context = indexService.newQueryShardContext(request.shardId().id(), canMatchSearcher, request::nowInMillis, request.getClusterAlias()); Rewriteable.rewrite(request.getRewriteable(), context, false); final boolean aliasFilterCanMatch = request.getAliasFilter() @@ -1142,6 +1182,7 @@ public CanMatchResponse canMatch(ShardSearchRequest request) throws IOException canMatch = aliasFilterCanMatch; } return new CanMatchResponse(canMatch || hasRefreshPending, minMax); + } } @@ -1167,10 +1208,14 @@ public static boolean canRewriteToMatchNone(SearchSourceBuilder source) { } private void rewriteAndFetchShardRequest(IndexShard shard, ShardSearchRequest request, ActionListener listener) { - ActionListener actionListener = ActionListener.wrap(r -> - // now we need to check if there is a pending refresh and register - shard.awaitShardSearchActive(b -> listener.onResponse(request)), - listener::onFailure); + ActionListener actionListener = ActionListener.wrap(r -> { + if (request.readerId() != null) { + listener.onResponse(request); + } else { + // now we need to check if there is a pending refresh and register + shard.awaitShardSearchActive(b -> listener.onResponse(request)); + } + }, listener::onFailure); // we also do rewrite on the coordinating node (TransportSearchService) but we also need to do it here for BWC as well as // AliasFilters that might need to be rewritten. These are edge-cases but we are every efficient doing the rewrite here so it's not // adding a lot of overhead diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java index ffbbdf589e375..804b67e11c725 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java @@ -115,8 +115,6 @@ public void execute(SearchContext context) { context.searcher().search(query, collector); } catch (Exception e) { throw new QueryPhaseExecutionException(context.shardTarget(), "Failed to execute global aggregators", e); - } finally { - context.clearReleasables(SearchContext.Lifetime.COLLECTION); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java index 0727cb5a24dc4..ff9785dde47b1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java @@ -27,7 +27,6 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.SearchContext.Lifetime; import org.elasticsearch.search.query.QueryPhaseExecutionException; import java.io.IOException; @@ -77,7 +76,7 @@ protected AggregatorBase(String name, AggregatorFactories factories, SearchConte this.breakerService = context.bigArrays().breakerService(); assert factories != null : "sub-factories provided to BucketAggregator must not be null, use AggragatorFactories.EMPTY instead"; this.subAggregators = factories.createSubAggregators(context, this, subAggregatorCardinality); - context.addReleasable(this, Lifetime.PHASE); + context.addReleasable(this); final SearchShardTarget shardTarget = context.shardTarget(); // Register a safeguard to highlight any invalid construction logic (call to this constructor without subsequent preCollection call) collectableSubAggregators = new BucketCollector() { diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index d0fa5052bde07..d7c3311579b55 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -109,6 +110,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R public static final ParseField SEARCH_AFTER = new ParseField("search_after"); public static final ParseField COLLAPSE = new ParseField("collapse"); public static final ParseField SLICE = new ParseField("slice"); + public static final ParseField POINT_IN_TIME = new ParseField("pit"); public static SearchSourceBuilder fromXContent(XContentParser parser) throws IOException { return fromXContent(parser, true); @@ -188,6 +190,8 @@ public static HighlightBuilder highlight() { private CollapseBuilder collapse = null; + private PointInTimeBuilder pointInTimeBuilder = null; + /** * Constructs a new search source builder. */ @@ -242,12 +246,14 @@ public SearchSourceBuilder(StreamInput in) throws IOException { sliceBuilder = in.readOptionalWriteable(SliceBuilder::new); collapse = in.readOptionalWriteable(CollapseBuilder::new); trackTotalHitsUpTo = in.readOptionalInt(); - if (in.getVersion().onOrAfter(Version.V_7_10_0)) { if (in.readBoolean()) { fetchFields = in.readList(FieldAndFormat::new); } } + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + pointInTimeBuilder = in.readOptionalWriteable(PointInTimeBuilder::new); + } } @Override @@ -302,13 +308,15 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(sliceBuilder); out.writeOptionalWriteable(collapse); out.writeOptionalInt(trackTotalHitsUpTo); - if (out.getVersion().onOrAfter(Version.V_7_10_0)) { out.writeBoolean(fetchFields != null); if (fetchFields != null) { out.writeList(fetchFields); } } + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeOptionalWriteable(pointInTimeBuilder); + } } /** @@ -955,6 +963,21 @@ public boolean isSuggestOnly() { && queryBuilder == null && aggregations == null; } + /** + * Returns the point in time that is configured with this query + */ + public PointInTimeBuilder pointInTimeBuilder() { + return pointInTimeBuilder; + } + + /** + * Specify a point in time that this query should execute against. + */ + public SearchSourceBuilder pointInTimeBuilder(PointInTimeBuilder builder) { + this.pointInTimeBuilder = builder; + return this; + } + /** * Rewrites this search source builder into its primitive form. e.g. by * rewriting the QueryBuilder. If the builder did not change the identity @@ -1040,6 +1063,7 @@ private SearchSourceBuilder shallowCopy(QueryBuilder queryBuilder, QueryBuilder rewrittenBuilder.version = version; rewrittenBuilder.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; rewrittenBuilder.collapse = collapse; + rewrittenBuilder.pointInTimeBuilder = pointInTimeBuilder; return rewrittenBuilder; } @@ -1148,6 +1172,8 @@ public void parseXContent(XContentParser parser, boolean checkTrailingTokens) th sliceBuilder = SliceBuilder.fromXContent(parser); } else if (COLLAPSE.match(currentFieldName, parser.getDeprecationHandler())) { collapse = CollapseBuilder.fromXContent(parser); + } else if (POINT_IN_TIME.match(currentFieldName, parser.getDeprecationHandler())) { + pointInTimeBuilder = PointInTimeBuilder.fromXContent(parser); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", parser.getTokenLocation()); @@ -1352,6 +1378,9 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t if (collapse != null) { builder.field(COLLAPSE.getPreferredName(), collapse); } + if (pointInTimeBuilder != null) { + builder.field(POINT_IN_TIME.getPreferredName(), pointInTimeBuilder); + } return builder; } @@ -1564,7 +1593,7 @@ public int hashCode() { return Objects.hash(aggregations, explain, fetchSourceContext, fetchFields, docValueFields, storedFieldsContext, from, highlightBuilder, indexBoosts, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields, size, sorts, searchAfterBuilder, sliceBuilder, stats, suggestBuilder, terminateAfter, timeout, trackScores, version, - seqNoAndPrimaryTerm, profile, extBuilders, collapse, trackTotalHitsUpTo); + seqNoAndPrimaryTerm, profile, extBuilders, collapse, trackTotalHitsUpTo, pointInTimeBuilder); } @Override @@ -1604,7 +1633,8 @@ public boolean equals(Object obj) { && Objects.equals(profile, other.profile) && Objects.equals(extBuilders, other.extBuilders) && Objects.equals(collapse, other.collapse) - && Objects.equals(trackTotalHitsUpTo, other.trackTotalHitsUpTo); + && Objects.equals(trackTotalHitsUpTo, other.trackTotalHitsUpTo) + && Objects.equals(pointInTimeBuilder, other.pointInTimeBuilder); } @Override @@ -1619,4 +1649,81 @@ public String toString(Params params) { throw new ElasticsearchException(e); } } + + /** + * Specify whether this search should use specific reader contexts instead of the latest ones. + */ + public static final class PointInTimeBuilder implements Writeable, ToXContentObject { + private static final ParseField ID_FIELD = new ParseField("id"); + private static final ParseField KEEP_ALIVE_FIELD = new ParseField("keep_alive"); + private static final ObjectParser PARSER; + + static { + PARSER = new ObjectParser<>(POINT_IN_TIME.getPreferredName(), XContentParams::new); + PARSER.declareString((params, id) -> params.id = id, ID_FIELD); + PARSER.declareField((params, keepAlive) -> params.keepAlive = keepAlive, + (p, c) -> TimeValue.parseTimeValue(p.text(), KEEP_ALIVE_FIELD.getPreferredName()), + KEEP_ALIVE_FIELD, ObjectParser.ValueType.STRING); + } + + private static final class XContentParams { + private String id; + private TimeValue keepAlive; + } + + private final String id; + private final TimeValue keepAlive; + + public PointInTimeBuilder(String id, TimeValue keepAlive) { + this.id = Objects.requireNonNull(id); + this.keepAlive = Objects.requireNonNull(keepAlive); + } + + public PointInTimeBuilder(StreamInput in) throws IOException { + id = in.readString(); + keepAlive = in.readTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + out.writeTimeValue(keepAlive); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(ID_FIELD.getPreferredName(), id); + builder.field(KEEP_ALIVE_FIELD.getPreferredName(), keepAlive); + return builder; + } + + public static PointInTimeBuilder fromXContent(XContentParser parser) throws IOException { + final XContentParams params = PARSER.parse(parser, null); + if (params.id == null || params.keepAlive == null) { + throw new IllegalArgumentException("id and keep_alive must be specified"); + } + return new PointInTimeBuilder(params.id, params.keepAlive); + } + + public TimeValue getKeepAlive() { + return keepAlive; + } + + public String getId() { + return id; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final PointInTimeBuilder that = (PointInTimeBuilder) o; + return Objects.equals(id, that.id) && Objects.equals(keepAlive, that.keepAlive); + } + + @Override + public int hashCode() { + return Objects.hash(id, keepAlive); + } + } } diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java index 5f931d661674e..a7a3ff7085d2e 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java @@ -25,12 +25,14 @@ import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.search.internal.ShardSearchRequest; import java.io.IOException; @@ -45,7 +47,7 @@ public class DfsSearchResult extends SearchPhaseResult { public DfsSearchResult(StreamInput in) throws IOException { super(in); - contextId = new SearchContextId(in); + contextId = new ShardSearchContextId(in); int termsSize = in.readVInt(); if (termsSize == 0) { terms = EMPTY_TERMS; @@ -59,11 +61,15 @@ public DfsSearchResult(StreamInput in) throws IOException { fieldStatistics = readFieldStats(in); maxDoc = in.readVInt(); + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + setShardSearchRequest(in.readOptionalWriteable(ShardSearchRequest::new)); + } } - public DfsSearchResult(SearchContextId contextId, SearchShardTarget shardTarget) { + public DfsSearchResult(ShardSearchContextId contextId, SearchShardTarget shardTarget, ShardSearchRequest shardSearchRequest) { this.setSearchShardTarget(shardTarget); this.contextId = contextId; + setShardSearchRequest(shardSearchRequest); } public DfsSearchResult maxDoc(int maxDoc) { @@ -98,7 +104,7 @@ public ObjectObjectHashMap fieldStatistics() { return fieldStatistics; } - @Override + @Override public void writeTo(StreamOutput out) throws IOException { contextId.writeTo(out); out.writeVInt(terms.length); @@ -109,6 +115,9 @@ public void writeTo(StreamOutput out) throws IOException { writeTermStats(out, termStatistics); writeFieldStats(out, fieldStatistics); out.writeVInt(maxDoc); + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeOptionalWriteable(getShardSearchRequest()); + } } public static void writeFieldStats(StreamOutput out, ObjectObjectHashMap rescore() { return in.rescore(); } - @Override - public void addRescore(RescoreContext rescore) { - in.addRescore(rescore); - } - @Override public boolean hasScriptFields() { return in.hasScriptFields(); @@ -450,26 +435,6 @@ public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int return in.docIdsToLoad(docIdsToLoad, docsIdsToLoadFrom, docsIdsToLoadSize); } - @Override - public void accessed(long accessTime) { - in.accessed(accessTime); - } - - @Override - public long lastAccessTime() { - return in.lastAccessTime(); - } - - @Override - public long keepAlive() { - return in.keepAlive(); - } - - @Override - public void keepAlive(long keepAlive) { - in.keepAlive(keepAlive); - } - @Override public DfsSearchResult dfsResult() { return in.dfsResult(); @@ -552,4 +517,14 @@ public SearchContext collapse(CollapseContext collapse) { public CollapseContext collapse() { return in.collapse(); } + + @Override + public void addRescore(RescoreContext rescore) { + in.addRescore(rescore); + } + + @Override + public ReaderContext readerContext() { + return in.readerContext(); + } } diff --git a/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java index ff86e44c1704b..f522d7760b2f7 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/InternalScrollSearchRequest.java @@ -33,21 +33,21 @@ public class InternalScrollSearchRequest extends TransportRequest { - private SearchContextId contextId; + private ShardSearchContextId contextId; private Scroll scroll; public InternalScrollSearchRequest() { } - public InternalScrollSearchRequest(SearchScrollRequest request, SearchContextId contextId) { + public InternalScrollSearchRequest(SearchScrollRequest request, ShardSearchContextId contextId) { this.contextId = contextId; this.scroll = request.scroll(); } public InternalScrollSearchRequest(StreamInput in) throws IOException { super(in); - contextId = new SearchContextId(in); + contextId = new ShardSearchContextId(in); scroll = in.readOptionalWriteable(Scroll::new); } @@ -58,7 +58,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(scroll); } - public SearchContextId contextId() { + public ShardSearchContextId contextId() { return contextId; } diff --git a/server/src/main/java/org/elasticsearch/search/internal/LegacyReaderContext.java b/server/src/main/java/org/elasticsearch/search/internal/LegacyReaderContext.java new file mode 100644 index 0000000000000..1c3c14ab14d38 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/internal/LegacyReaderContext.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.search.RescoreDocIds; +import org.elasticsearch.search.dfs.AggregatedDfs; + +import java.util.Objects; + +public class LegacyReaderContext extends ReaderContext { + private final ShardSearchRequest shardSearchRequest; + private final ScrollContext scrollContext; + private AggregatedDfs aggregatedDfs; + private RescoreDocIds rescoreDocIds; + + private Engine.Searcher searcher; + private Releasable onClose; + + public LegacyReaderContext(long id, IndexService indexService, IndexShard indexShard, Engine.SearcherSupplier reader, + ShardSearchRequest shardSearchRequest, long keepAliveInMillis) { + super(id, indexService, indexShard, reader, keepAliveInMillis, false); + assert shardSearchRequest.readerId() == null; + assert shardSearchRequest.keepAlive() == null; + this.shardSearchRequest = Objects.requireNonNull(shardSearchRequest); + if (shardSearchRequest.scroll() != null) { + this.scrollContext = new ScrollContext(); + } else { + this.scrollContext = null; + } + } + + @Override + public Engine.Searcher acquireSearcher(String source) { + if (scrollContext != null && "search".equals(source)) { + // Search scroll requests are special, they don't hold indices names so we have + // to reuse the searcher created on the request that initialized the scroll. + // This ensures that we wrap the searcher's reader with the user's permissions + // when they are available. + if (searcher == null) { + Engine.Searcher delegate = searcherSupplier.acquireSearcher(source); + onClose = delegate::close; + searcher = new Engine.Searcher(delegate.source(), delegate.getDirectoryReader(), + delegate.getSimilarity(), delegate.getQueryCache(), delegate.getQueryCachingPolicy(), () -> {}); + } + return searcher; + } + return super.acquireSearcher(source); + } + + + @Override + void doClose() { + Releasables.close(onClose, super::doClose); + } + + @Override + public ShardSearchRequest getShardSearchRequest(ShardSearchRequest other) { + return shardSearchRequest; + } + + @Override + public ScrollContext scrollContext() { + return scrollContext; + } + + @Override + public AggregatedDfs getAggregatedDfs(AggregatedDfs other) { + return aggregatedDfs; + } + + @Override + public void setAggregatedDfs(AggregatedDfs aggregatedDfs) { + this.aggregatedDfs = aggregatedDfs; + } + + @Override + public RescoreDocIds getRescoreDocIds(RescoreDocIds other) { + return rescoreDocIds; + } + + @Override + public void setRescoreDocIds(RescoreDocIds rescoreDocIds) { + this.rescoreDocIds = rescoreDocIds; + } + + @Override + public boolean singleSession() { + return scrollContext == null || scrollContext.scroll == null; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/internal/ReaderContext.java b/server/src/main/java/org/elasticsearch/search/internal/ReaderContext.java new file mode 100644 index 0000000000000..507a2c1fbb4d4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/internal/ReaderContext.java @@ -0,0 +1,203 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.internal; + +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.search.RescoreDocIds; +import org.elasticsearch.search.dfs.AggregatedDfs; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Holds a reference to a point in time {@link Engine.Searcher} that will be used to construct {@link SearchContext}. + * This class also implements {@link org.elasticsearch.common.util.concurrent.RefCounted} since in some situations like + * in {@link org.elasticsearch.search.SearchService} a SearchContext can be closed concurrently due to independent events + * ie. when an index gets removed. To prevent accessing closed IndexReader / IndexSearcher instances the SearchContext + * can be guarded by a reference count and fail if it's been closed by an external event. + */ +public class ReaderContext implements Releasable { + private final ShardSearchContextId id; + private final IndexService indexService; + private final IndexShard indexShard; + protected final Engine.SearcherSupplier searcherSupplier; + private final AtomicBoolean closed = new AtomicBoolean(false); + private final boolean singleSession; + + private final AtomicLong keepAlive; + private final AtomicLong lastAccessTime; + // For reference why we use RefCounted here see https://github.com/elastic/elasticsearch/pull/20095. + private final AbstractRefCounted refCounted; + + private final List onCloses = new CopyOnWriteArrayList<>(); + + private final long startTimeInNano = System.nanoTime(); + + private Map context; + + public ReaderContext(long id, + IndexService indexService, + IndexShard indexShard, + Engine.SearcherSupplier searcherSupplier, + long keepAliveInMillis, + boolean singleSession) { + this.id = new ShardSearchContextId(UUIDs.base64UUID(), id); + this.indexService = indexService; + this.indexShard = indexShard; + this.searcherSupplier = searcherSupplier; + this.singleSession = singleSession; + this.keepAlive = new AtomicLong(keepAliveInMillis); + this.lastAccessTime = new AtomicLong(nowInMillis()); + this.refCounted = new AbstractRefCounted("reader_context") { + @Override + protected void closeInternal() { + doClose(); + } + }; + } + + private long nowInMillis() { + return indexShard.getThreadPool().relativeTimeInMillis(); + } + + @Override + public final void close() { + if (closed.compareAndSet(false, true)) { + refCounted.decRef(); + } + } + + void doClose() { + Releasables.close(Releasables.wrap(onCloses), searcherSupplier); + } + + public void addOnClose(Releasable releasable) { + onCloses.add(releasable); + } + + public ShardSearchContextId id() { + return id; + } + + public IndexService indexService() { + return indexService; + } + + public IndexShard indexShard() { + return indexShard; + } + + public Engine.Searcher acquireSearcher(String source) { + return searcherSupplier.acquireSearcher(source); + } + + public void keepAlive(long keepAlive) { + this.keepAlive.updateAndGet(curr -> Math.max(curr, keepAlive)); + } + + /** + * Marks this reader as being used so its time to live should not be expired. + * + * @return a releasable to indicate the caller has stopped using this reader + */ + public Releasable markAsUsed() { + refCounted.incRef(); + return Releasables.releaseOnce(() -> { + this.lastAccessTime.updateAndGet(curr -> Math.max(curr, nowInMillis())); + refCounted.decRef(); + }); + } + + public boolean isExpired() { + if (refCounted.refCount() > 1) { + return false; // being used by markAsUsed + } + final long elapsed = nowInMillis() - lastAccessTime.get(); + return elapsed > keepAlive.get(); + } + + // BWC + public ShardSearchRequest getShardSearchRequest(ShardSearchRequest other) { + return Objects.requireNonNull(other); + } + + public ScrollContext scrollContext() { + return null; + } + + public AggregatedDfs getAggregatedDfs(AggregatedDfs other) { + return other; + } + + public void setAggregatedDfs(AggregatedDfs aggregatedDfs) { + + } + + public RescoreDocIds getRescoreDocIds(RescoreDocIds other) { + return Objects.requireNonNull(other); + } + + public void setRescoreDocIds(RescoreDocIds rescoreDocIds) { + + } + + /** + * Returns {@code true} for readers that are intended to use in a single query. For readers that are intended + * to use in multiple queries (i.e., scroll or readers), we should not release them after the fetch phase + * or the query phase with empty results. + */ + public boolean singleSession() { + return singleSession; + } + + /** + * Returns the object or null if the given key does not have a + * value in the context + */ + @SuppressWarnings("unchecked") // (T)object + public T getFromContext(String key) { + return context != null ? (T) context.get(key) : null; + } + + /** + * Puts the object into the context + */ + public void putInContext(String key, Object value) { + if (context == null) { + context = new HashMap<>(); + } + context.put(key, value); + } + + public long getStartTimeInNano() { + return startTimeInNano; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/internal/ScrollContext.java b/server/src/main/java/org/elasticsearch/search/internal/ScrollContext.java index 41d7680a780b0..5b9c632d4e522 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ScrollContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ScrollContext.java @@ -23,35 +23,10 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.search.Scroll; -import java.util.HashMap; -import java.util.Map; - /** Wrapper around information that needs to stay around when scrolling. */ public final class ScrollContext { - - private Map context = null; - public TotalHits totalHits = null; public float maxScore = Float.NaN; public ScoreDoc lastEmittedDoc; public Scroll scroll; - - /** - * Returns the object or null if the given key does not have a - * value in the context - */ - @SuppressWarnings("unchecked") // (T)object - public T getFromContext(String key) { - return context != null ? (T) context.get(key) : null; - } - - /** - * Puts the object into the context - */ - public void putInContext(String key, Object value) { - if (context == null) { - context = new HashMap<>(); - } - context.put(key, value); - } } diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java index c4a42acf980df..3bbf3ea387f29 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -29,9 +29,6 @@ import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.AbstractRefCounted; -import org.elasticsearch.common.util.concurrent.RefCounted; -import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.MappedFieldType; @@ -41,6 +38,7 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.search.RescoreDocIds; import org.elasticsearch.search.SearchExtBuilder; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.SearchContextAggregations; @@ -61,37 +59,30 @@ import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.suggest.SuggestionSearchContext; -import java.util.ArrayList; -import java.util.EnumMap; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; /** * This class encapsulates the state needed to execute a search. It holds a reference to the * shards point in time snapshot (IndexReader / ContextIndexSearcher) and allows passing on * state from one query / fetch phase to another. - * - * This class also implements {@link RefCounted} since in some situations like in {@link org.elasticsearch.search.SearchService} - * a SearchContext can be closed concurrently due to independent events ie. when an index gets removed. To prevent accessing closed - * IndexReader / IndexSearcher instances the SearchContext can be guarded by a reference count and fail if it's been closed by - * an external event. */ -// For reference why we use RefCounted here see #20095 -public abstract class SearchContext extends AbstractRefCounted implements Releasable { +public abstract class SearchContext implements Releasable { public static final int DEFAULT_TERMINATE_AFTER = 0; public static final int TRACK_TOTAL_HITS_ACCURATE = Integer.MAX_VALUE; public static final int TRACK_TOTAL_HITS_DISABLED = -1; public static final int DEFAULT_TRACK_TOTAL_HITS_UP_TO = 10000; - private Map> clearables = null; + private final List releasables = new CopyOnWriteArrayList<>(); private final AtomicBoolean closed = new AtomicBoolean(false); private InnerHitsContext innerHitsContext; - protected SearchContext() { - super("search_context"); - } + protected SearchContext() {} public abstract void setTask(SearchShardTask task); @@ -101,25 +92,15 @@ protected SearchContext() { @Override public final void close() { - if (closed.compareAndSet(false, true)) { // prevent double closing - decRef(); - } - } - - @Override - protected final void closeInternal() { - try { - clearReleasables(Lifetime.CONTEXT); - } finally { - doClose(); + if (closed.compareAndSet(false, true)) { + try { + Releasables.close(releasables); + } finally { + doClose(); + } } } - @Override - protected void alreadyClosed() { - throw new IllegalStateException("search context is already closed can't increment refCount current count [" + refCount() + "]"); - } - protected abstract void doClose(); /** @@ -132,7 +113,7 @@ protected void alreadyClosed() { * alias filters, types filters, etc. */ public abstract Query buildFilteredQuery(Query query); - public abstract SearchContextId id(); + public abstract ShardSearchContextId id(); public abstract String source(); @@ -146,12 +127,8 @@ protected void alreadyClosed() { public abstract float queryBoost(); - public abstract long getOriginNanoTime(); - public abstract ScrollContext scrollContext(); - public abstract SearchContext scrollContext(ScrollContext scroll); - public abstract SearchContextAggregations aggregations(); public abstract SearchContext aggregations(SearchContextAggregations aggregations); @@ -182,6 +159,36 @@ public InnerHitsContext innerHits() { public abstract void addRescore(RescoreContext rescore); + public final RescoreDocIds rescoreDocIds() { + final List rescore = rescore(); + if (rescore == null) { + return RescoreDocIds.EMPTY; + } + Map> rescoreDocIds = null; + for (int i = 0; i < rescore.size(); i++) { + final Set docIds = rescore.get(i).getRescoredDocs(); + if (docIds != null && docIds.isEmpty() == false) { + if (rescoreDocIds == null) { + rescoreDocIds = new HashMap<>(); + } + rescoreDocIds.put(i, docIds); + } + } + return rescoreDocIds == null ? RescoreDocIds.EMPTY : new RescoreDocIds(rescoreDocIds); + } + + public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) { + final List rescore = rescore(); + if (rescore != null) { + for (int i = 0; i < rescore.size(); i++) { + final Set docIds = rescoreDocIds.getId(i); + if (docIds != null) { + rescore.get(i).setRescoredDocs(docIds); + } + } + } + } + public abstract boolean hasScriptFields(); public abstract ScriptFieldsContext scriptFields(); @@ -332,14 +339,6 @@ public InnerHitsContext innerHits() { public abstract SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int docsIdsToLoadSize); - public abstract void accessed(long accessTime); - - public abstract long lastAccessTime(); - - public abstract long keepAlive(); - - public abstract void keepAlive(long keepAlive); - public abstract DfsSearchResult dfsResult(); public abstract QuerySearchResult queryResult(); @@ -353,38 +352,14 @@ public InnerHitsContext innerHits() { */ public abstract Profilers getProfilers(); + /** - * Schedule the release of a resource. The time when {@link Releasable#close()} will be called on this object - * is function of the provided {@link Lifetime}. + * Adds a releasable that will be freed when this context is closed. */ - public void addReleasable(Releasable releasable, Lifetime lifetime) { - if (clearables == null) { - clearables = new EnumMap<>(Lifetime.class); - } - List releasables = clearables.get(lifetime); - if (releasables == null) { - releasables = new ArrayList<>(); - clearables.put(lifetime, releasables); - } + public void addReleasable(Releasable releasable) { releasables.add(releasable); } - public void clearReleasables(Lifetime lifetime) { - if (clearables != null) { - List>releasables = new ArrayList<>(); - for (Lifetime lc : Lifetime.values()) { - if (lc.compareTo(lifetime) > 0) { - break; - } - List remove = clearables.remove(lc); - if (remove != null) { - releasables.add(remove); - } - } - Releasables.close(Iterables.flatten(releasables)); - } - } - /** * @return true if the request contains only suggest */ @@ -409,24 +384,6 @@ public final boolean hasOnlySuggest() { /** Return a view of the additional query collectors that should be run for this context. */ public abstract Map, Collector> queryCollectors(); - /** - * The life time of an object that is used during search execution. - */ - public enum Lifetime { - /** - * This life time is for objects that only live during collection time. - */ - COLLECTION, - /** - * This life time is for objects that need to live until the end of the current search phase. - */ - PHASE, - /** - * This life time is for objects that need to live until the search context they are attached to is destroyed. - */ - CONTEXT - } - public abstract QueryShardContext getQueryShardContext(); @Override @@ -445,4 +402,6 @@ public String toString() { result.append(" query=[").append(query()).append("]"); return result.toString(); } + + public abstract ReaderContext readerContext(); } diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContextId.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchContextId.java similarity index 89% rename from server/src/main/java/org/elasticsearch/search/internal/SearchContextId.java rename to server/src/main/java/org/elasticsearch/search/internal/ShardSearchContextId.java index 38513dcc5b7d3..d9474a279aa0c 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SearchContextId.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchContextId.java @@ -27,17 +27,16 @@ import java.io.IOException; import java.util.Objects; - -public final class SearchContextId implements Writeable { +public final class ShardSearchContextId implements Writeable { private final String readerId; private final long id; - public SearchContextId(String readerId, long id) { + public ShardSearchContextId(String readerId, long id) { this.readerId = Objects.requireNonNull(readerId); this.id = id; } - public SearchContextId(StreamInput in) throws IOException { + public ShardSearchContextId(StreamInput in) throws IOException { this.id = in.readLong(); if (in.getVersion().onOrAfter(Version.V_7_7_0)) { this.readerId = in.readString(); @@ -66,7 +65,7 @@ public long getId() { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - SearchContextId other = (SearchContextId) o; + ShardSearchContextId other = (ShardSearchContextId) o; return id == other.id && readerId.equals(other.readerId); } diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 1493dd26f976b..cc4660d6034bc 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; @@ -87,6 +88,8 @@ public class ShardSearchRequest extends TransportRequest implements IndicesReque //these are the only mutable fields, as they are subject to rewriting private AliasFilter aliasFilter; private SearchSourceBuilder source; + private final ShardSearchContextId readerId; + private final TimeValue keepAlive; public ShardSearchRequest(OriginalIndices originalIndices, SearchRequest searchRequest, @@ -97,6 +100,21 @@ public ShardSearchRequest(OriginalIndices originalIndices, long nowInMillis, @Nullable String clusterAlias, String[] indexRoutings) { + this(originalIndices, searchRequest, shardId, numberOfShards, aliasFilter, + indexBoost, nowInMillis, clusterAlias, indexRoutings, null, null); + } + + public ShardSearchRequest(OriginalIndices originalIndices, + SearchRequest searchRequest, + ShardId shardId, + int numberOfShards, + AliasFilter aliasFilter, + float indexBoost, + long nowInMillis, + @Nullable String clusterAlias, + String[] indexRoutings, + ShardSearchContextId readerId, + TimeValue keepAlive) { this(originalIndices, shardId, numberOfShards, @@ -110,7 +128,9 @@ public ShardSearchRequest(OriginalIndices originalIndices, searchRequest.preference(), searchRequest.scroll(), nowInMillis, - clusterAlias); + clusterAlias, + readerId, + keepAlive); // If allowPartialSearchResults is unset (ie null), the cluster-level default should have been substituted // at this stage. Any NPEs in the above are therefore an error in request preparation logic. assert searchRequest.allowPartialSearchResults() != null; @@ -120,7 +140,7 @@ public ShardSearchRequest(ShardId shardId, long nowInMillis, AliasFilter aliasFilter) { this(OriginalIndices.NONE, shardId, -1, SearchType.QUERY_THEN_FETCH, null, null, - aliasFilter, 1.0f, false, Strings.EMPTY_ARRAY, null, null, nowInMillis, null); + aliasFilter, 1.0f, false, Strings.EMPTY_ARRAY, null, null, nowInMillis, null, null, null); } private ShardSearchRequest(OriginalIndices originalIndices, @@ -136,7 +156,9 @@ private ShardSearchRequest(OriginalIndices originalIndices, String preference, Scroll scroll, long nowInMillis, - @Nullable String clusterAlias) { + @Nullable String clusterAlias, + ShardSearchContextId readerId, + TimeValue keepAlive) { this.shardId = shardId; this.numberOfShards = numberOfShards; this.searchType = searchType; @@ -151,6 +173,9 @@ private ShardSearchRequest(OriginalIndices originalIndices, this.nowInMillis = nowInMillis; this.clusterAlias = clusterAlias; this.originalIndices = originalIndices; + this.readerId = readerId; + this.keepAlive = keepAlive; + assert (readerId != null) == (keepAlive != null); } public ShardSearchRequest(StreamInput in) throws IOException { @@ -183,7 +208,15 @@ public ShardSearchRequest(StreamInput in) throws IOException { canReturnNullResponseIfMatchNoDocs = false; bottomSortValues = null; } + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + this.readerId = in.readOptionalWriteable(ShardSearchContextId::new); + this.keepAlive = in.readOptionalTimeValue(); + } else { + this.readerId = null; + this.keepAlive = null; + } originalIndices = OriginalIndices.readOriginalIndices(in); + assert (readerId != null) == (keepAlive != null); } public ShardSearchRequest(ShardSearchRequest clone) { @@ -203,6 +236,8 @@ public ShardSearchRequest(ShardSearchRequest clone) { this.canReturnNullResponseIfMatchNoDocs = clone.canReturnNullResponseIfMatchNoDocs; this.bottomSortValues = clone.bottomSortValues; this.originalIndices = clone.originalIndices; + this.readerId = clone.readerId; + this.keepAlive = clone.keepAlive; } @Override @@ -240,6 +275,10 @@ protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOExce out.writeBoolean(canReturnNullResponseIfMatchNoDocs); out.writeOptionalWriteable(bottomSortValues); } + if (out.getVersion().onOrAfter(Version.V_8_0_0) && asKey == false) { + out.writeOptionalWriteable(readerId); + out.writeOptionalTimeValue(keepAlive); + } } @Override @@ -343,6 +382,21 @@ public void canReturnNullResponseIfMatchNoDocs(boolean value) { private static final ThreadLocal scratch = ThreadLocal.withInitial(BytesStreamOutput::new); + /** + * Returns a non-null value if this request should execute using a specific point-in-time reader; + * otherwise, using the most up to date point-in-time reader. + */ + public ShardSearchContextId readerId() { + return readerId; + } + + /** + * Returns a non-null to specify the time to live of the point-in-time reader that is used to execute this request. + */ + public TimeValue keepAlive() { + return keepAlive; + } + /** * Returns the cache key for this shard search request, based on its content */ diff --git a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index 240d46c0292dc..03024966f7194 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -31,7 +31,6 @@ import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext; import org.elasticsearch.search.fetch.subphase.highlight.SearchHighlightContext; import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -87,11 +86,6 @@ public Query buildFilteredQuery(Query query) { throw new UnsupportedOperationException("this context should be read only"); } - @Override - public SearchContext scrollContext(ScrollContext scrollContext) { - throw new UnsupportedOperationException("Not supported"); - } - @Override public SearchContext aggregations(SearchContextAggregations aggregations) { throw new UnsupportedOperationException("Not supported"); @@ -112,11 +106,6 @@ public void suggest(SuggestionSearchContext suggest) { throw new UnsupportedOperationException("Not supported"); } - @Override - public void addRescore(RescoreContext rescore) { - throw new UnsupportedOperationException("Not supported"); - } - @Override public boolean hasScriptFields() { return scriptFields != null; @@ -345,16 +334,6 @@ public CollapseContext collapse() { return null; } - @Override - public void accessed(long accessTime) { - throw new UnsupportedOperationException("Not supported"); - } - - @Override - public void keepAlive(long keepAlive) { - throw new UnsupportedOperationException("Not supported"); - } - @Override public QuerySearchResult queryResult() { return querySearchResult; diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index a371adf5f04b0..fce89049fa0c1 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -354,8 +354,6 @@ private static boolean searchWithCollector(SearchContext searchContext, ContextI throw new QueryPhaseExecutionException(searchContext.shardTarget(), "Time exceeded"); } queryResult.searchTimedOut(true); - } finally { - searchContext.clearReleasables(SearchContext.Lifetime.COLLECTION); } if (searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER && queryResult.terminatedEarly() == null) { queryResult.terminatedEarly(false); @@ -410,8 +408,6 @@ private static boolean searchWithCollectorManager(SearchContext searchContext, C throw new QueryPhaseExecutionException(searchContext.shardTarget(), "Time exceeded"); } searchContext.queryResult().searchTimedOut(true); - } finally { - searchContext.clearReleasables(SearchContext.Lifetime.COLLECTION); } return false; // no rescoring when sorting by field } diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java index d85d6e674c634..efb9410b88a29 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchRequest.java @@ -19,15 +19,18 @@ package org.elasticsearch.search.query; +import org.elasticsearch.Version; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchShardTask; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.dfs.AggregatedDfs; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.TransportRequest; @@ -37,23 +40,29 @@ public class QuerySearchRequest extends TransportRequest implements IndicesRequest { - private final SearchContextId contextId; - + private final ShardSearchContextId contextId; private final AggregatedDfs dfs; - private final OriginalIndices originalIndices; + private final ShardSearchRequest shardSearchRequest; - public QuerySearchRequest(OriginalIndices originalIndices, SearchContextId contextId, AggregatedDfs dfs) { + public QuerySearchRequest(OriginalIndices originalIndices, ShardSearchContextId contextId, + ShardSearchRequest shardSearchRequest, AggregatedDfs dfs) { this.contextId = contextId; this.dfs = dfs; + this.shardSearchRequest = shardSearchRequest; this.originalIndices = originalIndices; } public QuerySearchRequest(StreamInput in) throws IOException { super(in); - contextId = new SearchContextId(in); + contextId = new ShardSearchContextId(in); dfs = new AggregatedDfs(in); originalIndices = OriginalIndices.readOriginalIndices(in); + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + this.shardSearchRequest = in.readOptionalWriteable(ShardSearchRequest::new); + } else { + this.shardSearchRequest = null; + } } @Override @@ -62,9 +71,12 @@ public void writeTo(StreamOutput out) throws IOException { contextId.writeTo(out); dfs.writeTo(out); OriginalIndices.writeOriginalIndices(originalIndices, out); + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeOptionalWriteable(shardSearchRequest); + } } - public SearchContextId contextId() { + public ShardSearchContextId contextId() { return contextId; } @@ -72,6 +84,11 @@ public AggregatedDfs dfs() { return dfs; } + @Nullable + public ShardSearchRequest shardSearchRequest() { + return shardSearchRequest; + } + @Override public String[] indices() { return originalIndices.indices(); diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 2608a6105ea49..b91cd98786fd1 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -27,11 +27,13 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.RescoreDocIds; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.suggest.Suggest; @@ -80,15 +82,16 @@ public QuerySearchResult(StreamInput in) throws IOException { isNull = false; } if (isNull == false) { - SearchContextId id = new SearchContextId(in); + ShardSearchContextId id = new ShardSearchContextId(in); readFromWithId(id, in); } } - public QuerySearchResult(SearchContextId id, SearchShardTarget shardTarget) { - this.contextId = id; + public QuerySearchResult(ShardSearchContextId contextId, SearchShardTarget shardTarget, ShardSearchRequest shardSearchRequest) { + this.contextId = contextId; setSearchShardTarget(shardTarget); isNull = false; + setShardSearchRequest(shardSearchRequest); } private QuerySearchResult(boolean isNull) { @@ -313,7 +316,7 @@ public boolean hasSearchContext() { return hasScoreDocs || hasSuggestHits(); } - public void readFromWithId(SearchContextId id, StreamInput in) throws IOException { + public void readFromWithId(ShardSearchContextId id, StreamInput in) throws IOException { this.contextId = id; from = in.readVInt(); size = in.readVInt(); @@ -339,6 +342,10 @@ public void readFromWithId(SearchContextId id, StreamInput in) throws IOExceptio hasProfileResults = profileShardResults != null; serviceTimeEWMA = in.readZLong(); nodeQueueSize = in.readInt(); + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + setShardSearchRequest(in.readOptionalWriteable(ShardSearchRequest::new)); + setRescoreDocIds(new RescoreDocIds(in)); + } } @Override @@ -376,6 +383,10 @@ public void writeToNoId(StreamOutput out) throws IOException { out.writeOptionalWriteable(profileShardResults); out.writeZLong(serviceTimeEWMA); out.writeInt(nodeQueueSize); + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeOptionalWriteable(getShardSearchRequest()); + getRescoreDocIds().writeTo(out); + } } public TotalHits getTotalHits() { diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java b/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java index 4f44af6321791..ffdb2bd854cb6 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescoreContext.java @@ -63,7 +63,11 @@ public void setRescoredDocs(Set docIds) { } public boolean isRescored(int docId) { - return rescoredDocs.contains(docId); + return rescoredDocs != null && rescoredDocs.contains(docId); + } + + public Set getRescoredDocs() { + return rescoredDocs; } /** diff --git a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java index 0321f247b0843..1e3f8dbc0a4b1 100644 --- a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java +++ b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java @@ -57,7 +57,7 @@ import org.elasticsearch.search.SearchContextMissingException; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.RemoteTransportException; @@ -821,7 +821,7 @@ public void testFailureToAndFromXContentWithDetails() throws IOException { OriginalIndices.NONE)), new ShardSearchFailure(new RepositoryException("repository_g", "Repo"), new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 62), null, OriginalIndices.NONE)), new ShardSearchFailure( - new SearchContextMissingException(new SearchContextId(UUIDs.randomBase64UUID(), 0L)), null) + new SearchContextMissingException(new ShardSearchContextId(UUIDs.randomBase64UUID(), 0L)), null) }; failure = new SearchPhaseExecutionException("phase_g", "G", failureCause, shardFailures); diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 9e2f0a6800446..21f50e89f8e5e 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -81,7 +81,7 @@ import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.MultiBucketConsumerService; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotId; @@ -355,7 +355,7 @@ public void testActionTransportException() throws IOException { } public void testSearchContextMissingException() throws IOException { - SearchContextId contextId = new SearchContextId(UUIDs.randomBase64UUID(), randomLong()); + ShardSearchContextId contextId = new ShardSearchContextId(UUIDs.randomBase64UUID(), randomLong()); Version version = VersionUtils.randomVersion(random()); SearchContextMissingException ex = serialize(new SearchContextMissingException(contextId), version); assertThat(ex.contextId().getId(), equalTo(contextId.getId())); diff --git a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index b3787bfe1b64b..5f077cd3dcb4c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -35,7 +35,7 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.Transport; @@ -58,7 +58,7 @@ public class AbstractSearchAsyncActionTests extends ESTestCase { private final List> resolvedNodes = new ArrayList<>(); - private final Set releasedContexts = new CopyOnWriteArraySet<>(); + private final Set releasedContexts = new CopyOnWriteArraySet<>(); private AbstractSearchAsyncAction createAction(SearchRequest request, ArraySearchPhaseResults results, @@ -113,7 +113,7 @@ long buildTookInMillis() { } @Override - public void sendReleaseSearchContext(SearchContextId contextId, Transport.Connection connection, + public void sendReleaseSearchContext(ShardSearchContextId contextId, Transport.Connection connection, OriginalIndices originalIndices) { releasedContexts.add(contextId); } @@ -163,12 +163,11 @@ public void testBuildShardSearchTransportRequest() { public void testBuildSearchResponse() { SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(randomBoolean()); + ArraySearchPhaseResults phaseResults = new ArraySearchPhaseResults<>(10); AbstractSearchAsyncAction action = createAction(searchRequest, - new ArraySearchPhaseResults<>(10), null, false, new AtomicLong()); - String scrollId = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); + phaseResults, null, false, new AtomicLong()); InternalSearchResponse internalSearchResponse = InternalSearchResponse.empty(); - SearchResponse searchResponse = action.buildSearchResponse(internalSearchResponse, scrollId, action.buildShardFailures()); - assertEquals(scrollId, searchResponse.getScrollId()); + SearchResponse searchResponse = action.buildSearchResponse(internalSearchResponse, action.buildShardFailures(), null, null); assertSame(searchResponse.getAggregations(), internalSearchResponse.aggregations()); assertSame(searchResponse.getSuggest(), internalSearchResponse.suggest()); assertSame(searchResponse.getProfileResults(), internalSearchResponse.profile()); @@ -177,14 +176,12 @@ public void testBuildSearchResponse() { public void testBuildSearchResponseAllowPartialFailures() { SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - AbstractSearchAsyncAction action = createAction(searchRequest, - new ArraySearchPhaseResults<>(10), null, false, new AtomicLong()); + final ArraySearchPhaseResults queryResult = new ArraySearchPhaseResults<>(10); + AbstractSearchAsyncAction action = createAction(searchRequest, queryResult, null, false, new AtomicLong()); action.onShardFailure(0, new SearchShardTarget("node", new ShardId("index", "index-uuid", 0), null, OriginalIndices.NONE), new IllegalArgumentException()); - String scrollId = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); InternalSearchResponse internalSearchResponse = InternalSearchResponse.empty(); - SearchResponse searchResponse = action.buildSearchResponse(internalSearchResponse, scrollId, action.buildShardFailures()); - assertEquals(scrollId, searchResponse.getScrollId()); + SearchResponse searchResponse = action.buildSearchResponse(internalSearchResponse, action.buildShardFailures(), null, null); assertSame(searchResponse.getAggregations(), internalSearchResponse.aggregations()); assertSame(searchResponse.getSuggest(), internalSearchResponse.suggest()); assertSame(searchResponse.getProfileResults(), internalSearchResponse.profile()); @@ -195,7 +192,7 @@ public void testSendSearchResponseDisallowPartialFailures() { SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false); AtomicReference exception = new AtomicReference<>(); ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); - Set requestIds = new HashSet<>(); + Set requestIds = new HashSet<>(); List> nodeLookups = new ArrayList<>(); int numFailures = randomIntBetween(1, 5); ArraySearchPhaseResults phaseResults = phaseResults(requestIds, nodeLookups, numFailures); @@ -207,7 +204,7 @@ public void testSendSearchResponseDisallowPartialFailures() { action.onShardFailure(i, new SearchShardTarget(failureNodeId, failureShardId, failureClusterAlias, OriginalIndices.NONE), new IllegalArgumentException()); } - action.sendSearchResponse(InternalSearchResponse.empty(), randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10)); + action.sendSearchResponse(InternalSearchResponse.empty(), phaseResults.results); assertThat(exception.get(), instanceOf(SearchPhaseExecutionException.class)); SearchPhaseExecutionException searchPhaseExecutionException = (SearchPhaseExecutionException)exception.get(); assertEquals(0, searchPhaseExecutionException.getSuppressed().length); @@ -223,7 +220,7 @@ public void testOnPhaseFailure() { SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(false); AtomicReference exception = new AtomicReference<>(); ActionListener listener = ActionListener.wrap(response -> fail("onResponse should not be called"), exception::set); - Set requestIds = new HashSet<>(); + Set requestIds = new HashSet<>(); List> nodeLookups = new ArrayList<>(); ArraySearchPhaseResults phaseResults = phaseResults(requestIds, nodeLookups, 0); AbstractSearchAsyncAction action = createAction(searchRequest, phaseResults, listener, false, new AtomicLong()); @@ -266,14 +263,14 @@ public void testShardNotAvailableWithDisallowPartialFailures() { assertEquals(0, searchPhaseExecutionException.getSuppressed().length); } - private static ArraySearchPhaseResults phaseResults(Set contextIds, + private static ArraySearchPhaseResults phaseResults(Set contextIds, List> nodeLookups, int numFailures) { int numResults = randomIntBetween(1, 10); ArraySearchPhaseResults phaseResults = new ArraySearchPhaseResults<>(numResults + numFailures); for (int i = 0; i < numResults; i++) { - SearchContextId contextId = new SearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong()); + ShardSearchContextId contextId = new ShardSearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong()); contextIds.add(contextId); SearchPhaseResult phaseResult = new PhaseResult(contextId); String resultClusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); @@ -288,7 +285,7 @@ private static ArraySearchPhaseResults phaseResults(Set array = new AtomicArray<>(3); SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult1 = - new SearchAsyncActionTests.TestSearchPhaseResult(new SearchContextId(UUIDs.randomBase64UUID(), 1), node1); + new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), 1), node1); testSearchPhaseResult1.setSearchShardTarget(new SearchShardTarget("node_1", new ShardId("idx", "uuid1", 2), null, null)); SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult2 = - new SearchAsyncActionTests.TestSearchPhaseResult(new SearchContextId(UUIDs.randomBase64UUID(), 12), node2); + new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), 12), node2); testSearchPhaseResult2.setSearchShardTarget(new SearchShardTarget("node_2", new ShardId("idy", "uuid2", 42), null, null)); SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult3 = - new SearchAsyncActionTests.TestSearchPhaseResult(new SearchContextId(UUIDs.randomBase64UUID(), 42), node3); + new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), 42), node3); testSearchPhaseResult3.setSearchShardTarget(new SearchShardTarget("node_3", new ShardId("idy", "uuid2", 43), null, null)); array.setOnce(0, testSearchPhaseResult1); array.setOnce(1, testSearchPhaseResult2); array.setOnce(2, testSearchPhaseResult3); AtomicInteger numFreed = new AtomicInteger(0); - String scrollId = TransportSearchHelper.buildScrollId(array, randomBoolean()); + String scrollId = TransportSearchHelper.buildScrollId(array, VersionUtils.randomVersion(random())); DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); CountDownLatch latch = new CountDownLatch(1); ActionListener listener = new LatchedActionListener<>(new ActionListener() { @@ -126,7 +127,7 @@ public void onFailure(Exception e) { SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override - public void sendFreeContext(Transport.Connection connection, SearchContextId contextId, + public void sendFreeContext(Transport.Connection connection, ShardSearchContextId contextId, ActionListener listener) { nodesInvoked.add(connection.getNode()); boolean freed = randomBoolean(); @@ -138,7 +139,7 @@ public void sendFreeContext(Transport.Connection connection, SearchContextId con } @Override - Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { return new SearchAsyncActionTests.MockConnection(node); } }; @@ -159,13 +160,13 @@ public void testClearScrollIdsWithFailure() throws IOException, InterruptedExcep DiscoveryNode node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); AtomicArray array = new AtomicArray<>(3); SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult1 = - new SearchAsyncActionTests.TestSearchPhaseResult(new SearchContextId(UUIDs.randomBase64UUID(), 1), node1); + new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), 1), node1); testSearchPhaseResult1.setSearchShardTarget(new SearchShardTarget("node_1", new ShardId("idx", "uuid1", 2), null, null)); SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult2 = - new SearchAsyncActionTests.TestSearchPhaseResult(new SearchContextId(UUIDs.randomBase64UUID(), 12), node2); + new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), 12), node2); testSearchPhaseResult2.setSearchShardTarget(new SearchShardTarget("node_2", new ShardId("idy", "uuid2", 42), null, null)); SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult3 = - new SearchAsyncActionTests.TestSearchPhaseResult(new SearchContextId(UUIDs.randomBase64UUID(), 42), node3); + new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), 42), node3); testSearchPhaseResult3.setSearchShardTarget(new SearchShardTarget("node_3", new ShardId("idy", "uuid2", 43), null, null)); array.setOnce(0, testSearchPhaseResult1); array.setOnce(1, testSearchPhaseResult2); @@ -173,7 +174,7 @@ public void testClearScrollIdsWithFailure() throws IOException, InterruptedExcep AtomicInteger numFreed = new AtomicInteger(0); AtomicInteger numFailures = new AtomicInteger(0); AtomicInteger numConnectionFailures = new AtomicInteger(0); - String scrollId = TransportSearchHelper.buildScrollId(array, randomBoolean()); + String scrollId = TransportSearchHelper.buildScrollId(array, VersionUtils.randomVersion(random())); DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); CountDownLatch latch = new CountDownLatch(1); @@ -197,7 +198,7 @@ public void onFailure(Exception e) { SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override - public void sendFreeContext(Transport.Connection connection, SearchContextId contextId, + public void sendFreeContext(Transport.Connection connection, ShardSearchContextId contextId, ActionListener listener) { nodesInvoked.add(connection.getNode()); boolean freed = randomBoolean(); @@ -217,7 +218,7 @@ public void sendFreeContext(Transport.Connection connection, SearchContextId con } @Override - Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { if (randomBoolean()) { numFailures.incrementAndGet(); numConnectionFailures.incrementAndGet(); diff --git a/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java b/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java index 836f65d4a715a..e8c56140d331b 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java @@ -25,7 +25,7 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.dfs.DfsSearchResult; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -61,7 +61,8 @@ public void testCollect() throws InterruptedException { case 1: state.add(1); executor.execute(() -> { - DfsSearchResult dfsSearchResult = new DfsSearchResult(new SearchContextId(UUIDs.randomBase64UUID(), shardID), null); + DfsSearchResult dfsSearchResult = new DfsSearchResult( + new ShardSearchContextId(UUIDs.randomBase64UUID(), shardID), null, null); dfsSearchResult.setShardIndex(shardID); dfsSearchResult.setSearchShardTarget(new SearchShardTarget("foo", new ShardId("bar", "baz", shardID), null, OriginalIndices.NONE)); diff --git a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java index dd7ca786c7738..140d28c47fd9a 100644 --- a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java @@ -25,7 +25,6 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.MockDirectoryWrapper; import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.index.shard.ShardId; @@ -33,7 +32,7 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.dfs.DfsSearchResult; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.query.QuerySearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.test.ESTestCase; @@ -46,8 +45,8 @@ public class DfsQueryPhaseTests extends ESTestCase { - private static DfsSearchResult newSearchResult(int shardIndex, SearchContextId contextId, SearchShardTarget target) { - DfsSearchResult result = new DfsSearchResult(contextId, target); + private static DfsSearchResult newSearchResult(int shardIndex, ShardSearchContextId contextId, SearchShardTarget target) { + DfsSearchResult result = new DfsSearchResult(contextId, target, null); result.setShardIndex(shardIndex); return result; } @@ -55,9 +54,9 @@ private static DfsSearchResult newSearchResult(int shardIndex, SearchContextId c public void testDfsWith2Shards() throws IOException { AtomicArray results = new AtomicArray<>(2); AtomicReference> responseRef = new AtomicReference<>(); - results.set(0, newSearchResult(0, new SearchContextId(UUIDs.randomBase64UUID(), 1), + results.set(0, newSearchResult(0, new ShardSearchContextId("", 1), new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); - results.set(1, newSearchResult(1, new SearchContextId(UUIDs.randomBase64UUID(), 2), + results.set(1, newSearchResult(1, new ShardSearchContextId("", 2), new SearchShardTarget("node2", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); results.get(0).termsStatistics(new Term[0], new TermStatistics[0]); results.get(1).termsStatistics(new Term[0], new TermStatistics[0]); @@ -67,16 +66,16 @@ public void testDfsWith2Shards() throws IOException { public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, SearchActionListener listener) { if (request.contextId().getId() == 1) { - QuerySearchResult queryResult = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), 123), - new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); + QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("", 123), + new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE), null); queryResult.topDocs(new TopDocsAndMaxScore( new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(2); // the size of the result set listener.onResponse(queryResult); } else if (request.contextId().getId() == 2) { - QuerySearchResult queryResult = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), 123), - new SearchShardTarget("node2", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); + QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("", 123), + new SearchShardTarget("node2", new ShardId("test", "na", 0), null, OriginalIndices.NONE), null); queryResult.topDocs(new TopDocsAndMaxScore( new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); @@ -89,7 +88,7 @@ public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest }; MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); mockSearchPhaseContext.searchTransport = searchTransportService; - DfsQueryPhase phase = new DfsQueryPhase(results, searchPhaseController(), + DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, searchPhaseController(), (response) -> new SearchPhase("test") { @Override public void run() throws IOException { @@ -115,10 +114,10 @@ public void run() throws IOException { public void testDfsWith1ShardFailed() throws IOException { AtomicArray results = new AtomicArray<>(2); AtomicReference> responseRef = new AtomicReference<>(); - final SearchContextId ctx1 = new SearchContextId(UUIDs.randomBase64UUID(), 1); - final SearchContextId ctx2 = new SearchContextId(UUIDs.randomBase64UUID(), 2); - results.set(0, newSearchResult(0, ctx1, new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); - results.set(1, newSearchResult(1, ctx2, new SearchShardTarget("node2", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); + results.set(0, newSearchResult(0, new ShardSearchContextId("", 1), + new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); + results.set(1, newSearchResult(1, new ShardSearchContextId("", 2), + new SearchShardTarget("node2", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); results.get(0).termsStatistics(new Term[0], new TermStatistics[0]); results.get(1).termsStatistics(new Term[0], new TermStatistics[0]); @@ -127,8 +126,9 @@ public void testDfsWith1ShardFailed() throws IOException { public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, SearchActionListener listener) { if (request.contextId().getId() == 1) { - QuerySearchResult queryResult = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), 123), - new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); + QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("", 123), + new SearchShardTarget("node1", new ShardId("test", "na", 0), + null, OriginalIndices.NONE), null); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs( new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); @@ -143,7 +143,7 @@ public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest }; MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); mockSearchPhaseContext.searchTransport = searchTransportService; - DfsQueryPhase phase = new DfsQueryPhase(results, searchPhaseController(), + DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, searchPhaseController(), (response) -> new SearchPhase("test") { @Override public void run() throws IOException { @@ -164,7 +164,7 @@ public void run() throws IOException { assertEquals(1, mockSearchPhaseContext.failures.size()); assertTrue(mockSearchPhaseContext.failures.get(0).getCause() instanceof MockDirectoryWrapper.FakeIOException); assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size()); - assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(ctx2)); + assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(new ShardSearchContextId("", 2L))); assertNull(responseRef.get().get(1)); } @@ -172,9 +172,9 @@ public void run() throws IOException { public void testFailPhaseOnException() throws IOException { AtomicArray results = new AtomicArray<>(2); AtomicReference> responseRef = new AtomicReference<>(); - results.set(0, newSearchResult(0, new SearchContextId(UUIDs.randomBase64UUID(), 1), + results.set(0, newSearchResult(0, new ShardSearchContextId("", 1), new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); - results.set(1, newSearchResult(1, new SearchContextId(UUIDs.randomBase64UUID(), 2), + results.set(1, newSearchResult(1, new ShardSearchContextId("", 2), new SearchShardTarget("node2", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); results.get(0).termsStatistics(new Term[0], new TermStatistics[0]); results.get(1).termsStatistics(new Term[0], new TermStatistics[0]); @@ -184,8 +184,8 @@ public void testFailPhaseOnException() throws IOException { public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, SearchActionListener listener) { if (request.contextId().getId() == 1) { - QuerySearchResult queryResult = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), 123), - new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); + QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("", 123), + new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE), null); queryResult.topDocs(new TopDocsAndMaxScore( new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); @@ -200,7 +200,7 @@ public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest }; MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); mockSearchPhaseContext.searchTransport = searchTransportService; - DfsQueryPhase phase = new DfsQueryPhase(results, searchPhaseController(), + DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, searchPhaseController(), (response) -> new SearchPhase("test") { @Override public void run() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index 2efe819740357..ff336cc030941 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.MockDirectoryWrapper; import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -36,7 +35,7 @@ import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalAggregationTestCase; @@ -74,7 +73,7 @@ public void testShortcutQueryAndFetchOptimization() { numHits = 0; } - FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext, ClusterState.EMPTY_STATE, + FetchSearchPhase phase = new FetchSearchPhase(results, controller, null, mockSearchPhaseContext, (searchResponse, scrollId) -> new SearchPhase("test") { @Override public void run() { @@ -100,18 +99,18 @@ public void testFetchTwoDocument() { ArraySearchPhaseResults results = controller.newSearchPhaseResults(EsExecutors.newDirectExecutorService(), NOOP, mockSearchPhaseContext.getRequest(), 2, exc -> {}); int resultSetSize = randomIntBetween(2, 10); - final SearchContextId ctx1 = new SearchContextId(UUIDs.randomBase64UUID(), 123); - QuerySearchResult queryResult = new QuerySearchResult(ctx1, - new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); + ShardSearchContextId ctx1 = new ShardSearchContextId(UUIDs.base64UUID(), 123); + QuerySearchResult queryResult = new QuerySearchResult(ctx1, new SearchShardTarget("node1", new ShardId("test", "na", 0), + null, OriginalIndices.NONE), null); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult, () -> {}); - final SearchContextId ctx2 = new SearchContextId(UUIDs.randomBase64UUID(), 312); - queryResult = new QuerySearchResult(ctx2, - new SearchShardTarget("node2", new ShardId("test", "na", 1), null, OriginalIndices.NONE)); + final ShardSearchContextId ctx2 = new ShardSearchContextId(UUIDs.base64UUID(), 321); + queryResult = new QuerySearchResult( + ctx2, new SearchShardTarget("node2", new ShardId("test", "na", 1), null, OriginalIndices.NONE), null); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); @@ -134,7 +133,7 @@ public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRe listener.onResponse(fetchResult); } }; - FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext, ClusterState.EMPTY_STATE, + FetchSearchPhase phase = new FetchSearchPhase(results, controller, null, mockSearchPhaseContext, (searchResponse, scrollId) -> new SearchPhase("test") { @Override public void run() { @@ -161,18 +160,17 @@ public void testFailFetchOneDoc() { ArraySearchPhaseResults results = controller.newSearchPhaseResults(EsExecutors.newDirectExecutorService(), NOOP, mockSearchPhaseContext.getRequest(), 2, exc -> {}); int resultSetSize = randomIntBetween(2, 10); - SearchContextId ctx1 = new SearchContextId(UUIDs.randomBase64UUID(), 123); - QuerySearchResult queryResult = new QuerySearchResult(ctx1, - new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); + final ShardSearchContextId ctx = new ShardSearchContextId(UUIDs.base64UUID(), 123); + QuerySearchResult queryResult = new QuerySearchResult(ctx, + new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE), null); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult, () -> {}); - SearchContextId ctx2 = new SearchContextId(UUIDs.randomBase64UUID(), 321); - queryResult = new QuerySearchResult(ctx2, - new SearchShardTarget("node2", new ShardId("test", "na", 1), null, OriginalIndices.NONE)); + queryResult = new QuerySearchResult(new ShardSearchContextId("", 321), + new SearchShardTarget("node2", new ShardId("test", "na", 1), null, OriginalIndices.NONE), null); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); @@ -194,7 +192,7 @@ public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRe } }; - FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext, ClusterState.EMPTY_STATE, + FetchSearchPhase phase = new FetchSearchPhase(results, controller, null, mockSearchPhaseContext, (searchResponse, scrollId) -> new SearchPhase("test") { @Override public void run() { @@ -213,7 +211,7 @@ public void run() { assertEquals(1, searchResponse.getShardFailures().length); assertTrue(searchResponse.getShardFailures()[0].getCause() instanceof MockDirectoryWrapper.FakeIOException); assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size()); - assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(ctx1)); + assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(ctx)); } public void testFetchDocsConcurrently() throws InterruptedException { @@ -226,8 +224,8 @@ public void testFetchDocsConcurrently() throws InterruptedException { ArraySearchPhaseResults results = controller.newSearchPhaseResults(EsExecutors.newDirectExecutorService(), NOOP, mockSearchPhaseContext.getRequest(), numHits, exc -> {}); for (int i = 0; i < numHits; i++) { - QuerySearchResult queryResult = new QuerySearchResult(new SearchContextId("", i), - new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); + QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("", i), + new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE), null); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(i+1, i)}), i), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set @@ -240,14 +238,14 @@ public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRe SearchActionListener listener) { new Thread(() -> { FetchSearchResult fetchResult = new FetchSearchResult(); - fetchResult.hits(new SearchHits(new SearchHit[]{new SearchHit((int) (request.contextId().getId() + 1))}, + fetchResult.hits(new SearchHits(new SearchHit[] {new SearchHit((int) (request.contextId().getId()+1))}, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 100F)); listener.onResponse(fetchResult); }).start(); } }; CountDownLatch latch = new CountDownLatch(1); - FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext, ClusterState.EMPTY_STATE, + FetchSearchPhase phase = new FetchSearchPhase(results, controller, null, mockSearchPhaseContext, (searchResponse, scrollId) -> new SearchPhase("test") { @Override public void run() { @@ -284,16 +282,17 @@ public void testExceptionFailsPhase() { controller.newSearchPhaseResults(EsExecutors.newDirectExecutorService(), NOOP, mockSearchPhaseContext.getRequest(), 2, exc -> {}); int resultSetSize = randomIntBetween(2, 10); - QuerySearchResult queryResult = new QuerySearchResult(new SearchContextId("", 123), - new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); + QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("", 123), + new SearchShardTarget("node1", new ShardId("test", "na", 0), + null, OriginalIndices.NONE), null); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult, () -> {}); - queryResult = new QuerySearchResult(new SearchContextId("", 321), - new SearchShardTarget("node2", new ShardId("test", "na", 1), null, OriginalIndices.NONE)); + queryResult = new QuerySearchResult(new ShardSearchContextId("", 321), + new SearchShardTarget("node2", new ShardId("test", "na", 1), null, OriginalIndices.NONE), null); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); @@ -312,14 +311,14 @@ public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRe fetchResult.hits(new SearchHits(new SearchHit[] {new SearchHit(84)}, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 2.0F)); } else { - assertEquals(request.contextId().getId(), 123); + assertEquals(request, 123); fetchResult.hits(new SearchHits(new SearchHit[] {new SearchHit(42)}, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F)); } listener.onResponse(fetchResult); } }; - FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext, ClusterState.EMPTY_STATE, + FetchSearchPhase phase = new FetchSearchPhase(results, controller, null, mockSearchPhaseContext, (searchResponse, scrollId) -> new SearchPhase("test") { @Override public void run() { @@ -341,18 +340,18 @@ public void testCleanupIrrelevantContexts() { // contexts that are not fetched s ArraySearchPhaseResults results = controller.newSearchPhaseResults(EsExecutors.newDirectExecutorService(), NOOP, mockSearchPhaseContext.getRequest(), 2, exc -> {}); int resultSetSize = 1; - SearchContextId ctx1 = new SearchContextId(UUIDs.randomBase64UUID(), 123); + final ShardSearchContextId ctx1 = new ShardSearchContextId(UUIDs.base64UUID(), 123); QuerySearchResult queryResult = new QuerySearchResult(ctx1, - new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE)); + new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE), null); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult, () -> {}); - SearchContextId ctx2 = new SearchContextId(UUIDs.randomBase64UUID(), 321); + final ShardSearchContextId ctx2 = new ShardSearchContextId(UUIDs.base64UUID(), 321); queryResult = new QuerySearchResult(ctx2, - new SearchShardTarget("node2", new ShardId("test", "na", 1), null, OriginalIndices.NONE)); + new SearchShardTarget("node2", new ShardId("test", "na", 1), null, OriginalIndices.NONE), null); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); @@ -364,7 +363,7 @@ public void testCleanupIrrelevantContexts() { // contexts that are not fetched s public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRequest request, SearchTask task, SearchActionListener listener) { FetchSearchResult fetchResult = new FetchSearchResult(); - if (request.contextId().equals(ctx2)) { + if (request.contextId().getId() == 321) { fetchResult.hits(new SearchHits(new SearchHit[] {new SearchHit(84)}, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 2.0F)); } else { @@ -373,7 +372,7 @@ public void sendExecuteFetch(Transport.Connection connection, ShardFetchSearchRe listener.onResponse(fetchResult); } }; - FetchSearchPhase phase = new FetchSearchPhase(results, controller, mockSearchPhaseContext, ClusterState.EMPTY_STATE, + FetchSearchPhase phase = new FetchSearchPhase(results, controller, null, mockSearchPhaseContext, (searchResponse, scrollId) -> new SearchPhase("test") { @Override public void run() { diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index cd060c971a4f4..5b145994dd738 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -20,11 +20,14 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.InternalSearchResponse; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.transport.Transport; import org.junit.Assert; @@ -47,7 +50,7 @@ public final class MockSearchPhaseContext implements SearchPhaseContext { final AtomicInteger numSuccess; final List failures = Collections.synchronizedList(new ArrayList<>()); SearchTransportService searchTransport; - final Set releasedSearchContexts = new HashSet<>(); + final Set releasedSearchContexts = new HashSet<>(); final SearchRequest searchRequest = new SearchRequest(); final AtomicReference searchResponse = new AtomicReference<>(); @@ -83,9 +86,12 @@ public SearchRequest getRequest() { } @Override - public void sendSearchResponse(InternalSearchResponse internalSearchResponse, String scrollId) { + public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults) { + String scrollId = getRequest().scroll() != null ? TransportSearchHelper.buildScrollId(queryResults, Version.CURRENT) : null; + String searchContextId = + getRequest().pointInTimeBuilder() != null ? TransportSearchHelper.buildScrollId(queryResults, Version.CURRENT) : null; searchResponse.set(new SearchResponse(internalSearchResponse, scrollId, numShards, numSuccess.get(), 0, 0, - failures.toArray(ShardSearchFailure.EMPTY_ARRAY), SearchResponse.Clusters.EMPTY)); + failures.toArray(ShardSearchFailure.EMPTY_ARRAY), SearchResponse.Clusters.EMPTY, searchContextId)); } @Override @@ -136,7 +142,7 @@ public void onFailure(Exception e) { } @Override - public void sendReleaseSearchContext(SearchContextId contextId, Transport.Connection connection, OriginalIndices originalIndices) { + public void sendReleaseSearchContext(ShardSearchContextId contextId, Transport.Connection connection, OriginalIndices originalIndices) { releasedSearchContexts.add(contextId); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchResponseTests.java index d91a4eaf02288..fecf47bd29c7d 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchResponseTests.java @@ -49,7 +49,7 @@ protected MultiSearchResponse createTestInstance() { SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); InternalSearchResponse internalSearchResponse = InternalSearchResponse.empty(); SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, totalShards, - successfulShards, skippedShards, tookInMillis, ShardSearchFailure.EMPTY_ARRAY, clusters); + successfulShards, skippedShards, tookInMillis, ShardSearchFailure.EMPTY_ARRAY, clusters, null); items[i] = new MultiSearchResponse.Item(searchResponse, null); } return new MultiSearchResponse(items, randomNonNegativeLong()); @@ -68,7 +68,7 @@ private static MultiSearchResponse createTestInstanceWithFailures() { SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); InternalSearchResponse internalSearchResponse = InternalSearchResponse.empty(); SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, totalShards, - successfulShards, skippedShards, tookInMillis, ShardSearchFailure.EMPTY_ARRAY, clusters); + successfulShards, skippedShards, tookInMillis, ShardSearchFailure.EMPTY_ARRAY, clusters, null); items[i] = new MultiSearchResponse.Item(searchResponse, null); } else { items[i] = new MultiSearchResponse.Item(null, new ElasticsearchException("an error")); @@ -81,7 +81,7 @@ private static MultiSearchResponse createTestInstanceWithFailures() { protected MultiSearchResponse doParseInstance(XContentParser parser) throws IOException { return MultiSearchResponse.fromXContext(parser); } - + @Override protected void assertEqualInstances(MultiSearchResponse expected, MultiSearchResponse actual) { assertThat(actual.getTook(), equalTo(expected.getTook())); @@ -106,7 +106,7 @@ protected boolean supportsUnknownFields() { protected Predicate getRandomFieldsExcludeFilterWhenResultHasErrors() { return field -> field.startsWith("responses"); - } + } /** * Test parsing {@link MultiSearchResponse} with inner failures as they don't support asserting on xcontent equivalence, given that @@ -123,6 +123,6 @@ public void testFromXContentWithFailures() throws IOException { AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, Strings.EMPTY_ARRAY, getRandomFieldsExcludeFilterWhenResultHasErrors(), this::createParser, this::doParseInstance, this::assertEqualInstances, assertToXContentEquivalence, ToXContent.EMPTY_PARAMS); - } + } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index 4e32a7cb1ea40..f92902d631a1f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -35,7 +35,7 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.InternalSearchResponse; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportException; @@ -127,7 +127,7 @@ protected void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting sha new Thread(() -> { Transport.Connection connection = getConnection(null, shard.currentNodeId()); TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( - new SearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), + new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), connection.getNode()); listener.onResponse(testSearchPhaseResult); @@ -153,7 +153,7 @@ protected void executeNext(Runnable runnable, Thread originalThread) { asyncAction.start(); latch.await(); assertTrue(searchPhaseDidRun.get()); - SearchResponse searchResponse = asyncAction.buildSearchResponse(null, null, asyncAction.buildShardFailures()); + SearchResponse searchResponse = asyncAction.buildSearchResponse(null, asyncAction.buildShardFailures(), null, null); assertEquals(shardsIter.size() - numSkipped, numRequests.get()); assertEquals(0, searchResponse.getFailedShards()); assertEquals(numSkipped, searchResponse.getSkippedShards()); @@ -238,7 +238,7 @@ protected void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting sha } Transport.Connection connection = getConnection(null, shard.currentNodeId()); TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( - new SearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), connection.getNode()); + new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), connection.getNode()); if (shardFailures[shard.shardId().id()]) { listener.onFailure(new RuntimeException()); } else { @@ -282,7 +282,7 @@ public void testFanOutAndCollect() throws InterruptedException { DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); - Map> nodeToContextMap = newConcurrentMap(); + Map> nodeToContextMap = newConcurrentMap(); AtomicInteger contextIdGenerator = new AtomicInteger(0); int numShards = randomIntBetween(1, 10); GroupShardsIterator shardsIter = getShardsIter("idx", @@ -291,7 +291,7 @@ public void testFanOutAndCollect() throws InterruptedException { AtomicInteger numFreedContext = new AtomicInteger(); SearchTransportService transportService = new SearchTransportService(null, null) { @Override - public void sendFreeContext(Transport.Connection connection, SearchContextId contextId, OriginalIndices originalIndices) { + public void sendFreeContext(Transport.Connection connection, ShardSearchContextId contextId, OriginalIndices originalIndices) { numFreedContext.incrementAndGet(); assertTrue(nodeToContextMap.containsKey(connection.getNode())); assertTrue(nodeToContextMap.get(connection.getNode()).remove(contextId)); @@ -332,8 +332,8 @@ protected void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting sha assertTrue("shard: " + shard.shardId() + " has been queried twice", response.queried.add(shard.shardId())); Transport.Connection connection = getConnection(null, shard.currentNodeId()); TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( - new SearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), connection.getNode()); - Set ids = nodeToContextMap.computeIfAbsent(connection.getNode(), (n) -> newConcurrentSet()); + new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), connection.getNode()); + Set ids = nodeToContextMap.computeIfAbsent(connection.getNode(), (n) -> newConcurrentSet()); ids.add(testSearchPhaseResult.getContextId()); if (randomBoolean()) { listener.onResponse(testSearchPhaseResult); @@ -392,7 +392,7 @@ public void testFanOutAndFail() throws InterruptedException { DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); - Map> nodeToContextMap = newConcurrentMap(); + Map> nodeToContextMap = newConcurrentMap(); AtomicInteger contextIdGenerator = new AtomicInteger(0); int numShards = randomIntBetween(2, 10); GroupShardsIterator shardsIter = getShardsIter("idx", @@ -401,7 +401,7 @@ public void testFanOutAndFail() throws InterruptedException { AtomicInteger numFreedContext = new AtomicInteger(); SearchTransportService transportService = new SearchTransportService(null, null) { @Override - public void sendFreeContext(Transport.Connection connection, SearchContextId contextId, OriginalIndices originalIndices) { + public void sendFreeContext(Transport.Connection connection, ShardSearchContextId contextId, OriginalIndices originalIndices) { assertNotNull(contextId); numFreedContext.incrementAndGet(); assertTrue(nodeToContextMap.containsKey(connection.getNode())); @@ -446,9 +446,9 @@ protected void executePhaseOnShard(SearchShardIterator shardIt, if (shard.shardId().id() == 0) { testSearchPhaseResult = new TestSearchPhaseResult(null, connection.getNode()); } else { - testSearchPhaseResult = new TestSearchPhaseResult(new SearchContextId(UUIDs.randomBase64UUID(), + testSearchPhaseResult = new TestSearchPhaseResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), connection.getNode()); - Set ids = nodeToContextMap.computeIfAbsent(connection.getNode(), (n) -> newConcurrentSet()); + Set ids = nodeToContextMap.computeIfAbsent(connection.getNode(), (n) -> newConcurrentSet()); ids.add(testSearchPhaseResult.getContextId()); } if (randomBoolean()) { @@ -547,7 +547,7 @@ protected void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting sha new Thread(() -> { Transport.Connection connection = getConnection(null, shard.currentNodeId()); TestSearchPhaseResult testSearchPhaseResult = new TestSearchPhaseResult( - new SearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), connection.getNode()); + new ShardSearchContextId(UUIDs.randomBase64UUID(), contextIdGenerator.incrementAndGet()), connection.getNode()); if (shardIt.remaining() > 0) { numFailReplicas.incrementAndGet(); listener.onFailure(new RuntimeException()); @@ -619,13 +619,13 @@ public static class TestSearchResponse extends SearchResponse { final Set queried = new HashSet<>(); TestSearchResponse() { - super(InternalSearchResponse.empty(), null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, Clusters.EMPTY); + super(InternalSearchResponse.empty(), null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, Clusters.EMPTY, null); } } public static class TestSearchPhaseResult extends SearchPhaseResult { final DiscoveryNode node; - TestSearchPhaseResult(SearchContextId contextId, DiscoveryNode node) { + TestSearchPhaseResult(ShardSearchContextId contextId, DiscoveryNode node) { this.contextId = contextId; this.node = node; } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java new file mode 100644 index 0000000000000..00d65f34ed11e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.index.query.IdsQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.test.ESTestCase; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.nullValue; + +public class SearchContextIdTests extends ESTestCase { + + QueryBuilder randomQueryBuilder() { + if (randomBoolean()) { + return new TermQueryBuilder(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } else if (randomBoolean()) { + return new MatchAllQueryBuilder(); + } else { + return new IdsQueryBuilder().addIds(randomAlphaOfLength(10)); + } + } + + public void testEncode() { + final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(List.of( + new NamedWriteableRegistry.Entry(QueryBuilder.class, TermQueryBuilder.NAME, TermQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, MatchAllQueryBuilder.NAME, MatchAllQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, IdsQueryBuilder.NAME, IdsQueryBuilder::new) + )); + final AtomicArray queryResults = TransportSearchHelperTests.generateQueryResults(); + final Version version = Version.CURRENT; + final Map aliasFilters = new HashMap<>(); + for (SearchPhaseResult result : queryResults.asList()) { + final AliasFilter aliasFilter; + if (randomBoolean()) { + aliasFilter = new AliasFilter(randomQueryBuilder()); + } else if (randomBoolean()) { + aliasFilter = new AliasFilter(randomQueryBuilder(), "alias-" + between(1, 10)); + } else { + aliasFilter = AliasFilter.EMPTY; + } + if (randomBoolean()) { + aliasFilters.put(result.getSearchShardTarget().getShardId().getIndex().getUUID(), aliasFilter); + } + } + final String id = SearchContextId.encode(queryResults.asList(), aliasFilters, version); + final SearchContextId context = SearchContextId.decode(namedWriteableRegistry, id); + assertThat(context.shards().keySet(), hasSize(3)); + assertThat(context.aliasFilter(), equalTo(aliasFilters)); + SearchContextIdForNode node1 = context.shards().get(new ShardId("idx", "uuid1", 2)); + assertThat(node1.getClusterAlias(), equalTo("cluster_x")); + assertThat(node1.getNode(), equalTo("node_1")); + assertThat(node1.getSearchContextId().getId(), equalTo(1L)); + assertThat(node1.getSearchContextId().getReaderId(), equalTo("a")); + + SearchContextIdForNode node2 = context.shards().get(new ShardId("idy", "uuid2", 42)); + assertThat(node2.getClusterAlias(), equalTo("cluster_y")); + assertThat(node2.getNode(), equalTo("node_2")); + assertThat(node2.getSearchContextId().getId(), equalTo(12L)); + assertThat(node2.getSearchContextId().getReaderId(), equalTo("b")); + + SearchContextIdForNode node3 = context.shards().get(new ShardId("idy", "uuid2", 43)); + assertThat(node3.getClusterAlias(), nullValue()); + assertThat(node3.getNode(), equalTo("node_3")); + assertThat(node3.getSearchContextId().getId(), equalTo(42L)); + assertThat(node3.getSearchContextId().getReaderId(), equalTo("c")); + } +} diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index 6ebd5f60135a6..b56de3fc7c0c7 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -63,7 +63,7 @@ import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.suggest.SortBy; import org.elasticsearch.search.suggest.Suggest; @@ -294,7 +294,7 @@ private static AtomicArray generateQueryResults(int nShards, String clusterAlias = randomBoolean() ? null : "remote"; SearchShardTarget searchShardTarget = new SearchShardTarget("", new ShardId("", "", shardIndex), clusterAlias, OriginalIndices.NONE); - QuerySearchResult querySearchResult = new QuerySearchResult(new SearchContextId("", shardIndex), searchShardTarget); + QuerySearchResult querySearchResult = new QuerySearchResult(new ShardSearchContextId("", shardIndex), searchShardTarget, null); final TopDocs topDocs; float maxScore = 0; if (searchHitsSize == 0) { @@ -366,7 +366,7 @@ private static AtomicArray generateFetchResults(int nShards, float maxScore = -1F; String clusterAlias = randomBoolean() ? null : "remote"; SearchShardTarget shardTarget = new SearchShardTarget("", new ShardId("", "", shardIndex), clusterAlias, OriginalIndices.NONE); - FetchSearchResult fetchSearchResult = new FetchSearchResult(new SearchContextId("", shardIndex), shardTarget); + FetchSearchResult fetchSearchResult = new FetchSearchResult(new ShardSearchContextId("", shardIndex), shardTarget); List searchHits = new ArrayList<>(); for (ScoreDoc scoreDoc : mergedSearchDocs) { if (scoreDoc.shardIndex == shardIndex) { @@ -431,8 +431,8 @@ private void consumerTestCase(int numEmptyResponses) throws Exception { numEmptyResponses --; } - QuerySearchResult result = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), 0), - new SearchShardTarget("node", new ShardId("a", "b", 0), null, OriginalIndices.NONE)); + QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", 0), + new SearchShardTarget("node", new ShardId("a", "b", 0), null, OriginalIndices.NONE), null); result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), new DocValueFormat[0]); InternalAggregations aggs = InternalAggregations.from(singletonList(new InternalMax("test", 1.0D, DocValueFormat.RAW, emptyMap()))); @@ -440,8 +440,8 @@ private void consumerTestCase(int numEmptyResponses) throws Exception { result.setShardIndex(0); consumer.consumeResult(result, latch::countDown); - result = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), 1), - new SearchShardTarget("node", new ShardId("a", "b", 0), null, OriginalIndices.NONE)); + result = new QuerySearchResult(new ShardSearchContextId("", 1), + new SearchShardTarget("node", new ShardId("a", "b", 0), null, OriginalIndices.NONE), null); result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), new DocValueFormat[0]); aggs = InternalAggregations.from(singletonList(new InternalMax("test", 3.0D, DocValueFormat.RAW, emptyMap()))); @@ -449,8 +449,8 @@ private void consumerTestCase(int numEmptyResponses) throws Exception { result.setShardIndex(2); consumer.consumeResult(result, latch::countDown); - result = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), 1), - new SearchShardTarget("node", new ShardId("a", "b", 0), null, OriginalIndices.NONE)); + result = new QuerySearchResult(new ShardSearchContextId("", 1), + new SearchShardTarget("node", new ShardId("a", "b", 0), null, OriginalIndices.NONE), null); result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), new DocValueFormat[0]); aggs = InternalAggregations.from(singletonList(new InternalMax("test", 2.0D, DocValueFormat.RAW, emptyMap()))); @@ -514,8 +514,8 @@ public void testConsumerConcurrently() throws Exception { threads[i] = new Thread(() -> { int number = randomIntBetween(1, 1000); max.updateAndGet(prev -> Math.max(prev, number)); - QuerySearchResult result = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), id), - new SearchShardTarget("node", new ShardId("a", "b", id), null, OriginalIndices.NONE)); + QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", id), + new SearchShardTarget("node", new ShardId("a", "b", id), null, OriginalIndices.NONE), null); result.topDocs(new TopDocsAndMaxScore( new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(0, number)}), number), new DocValueFormat[0]); @@ -561,8 +561,8 @@ public void testConsumerOnlyAggs() throws Exception { for (int i = 0; i < expectedNumResults; i++) { int number = randomIntBetween(1, 1000); max.updateAndGet(prev -> Math.max(prev, number)); - QuerySearchResult result = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), i), - new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE)); + QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", i), + new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE), null); result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), number), new DocValueFormat[0]); InternalAggregations aggs = InternalAggregations.from(Collections.singletonList(new InternalMax("test", (double) number, @@ -602,8 +602,8 @@ public void testConsumerOnlyHits() throws Exception { for (int i = 0; i < expectedNumResults; i++) { int number = randomIntBetween(1, 1000); max.updateAndGet(prev -> Math.max(prev, number)); - QuerySearchResult result = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), i), - new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE)); + QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", i), + new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE), null); result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(0, number)}), number), new DocValueFormat[0]); result.setShardIndex(i); @@ -643,8 +643,8 @@ public void testReduceTopNWithFromOffset() throws Exception { int score = 100; CountDownLatch latch = new CountDownLatch(4); for (int i = 0; i < 4; i++) { - QuerySearchResult result = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), i), - new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE)); + QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", i), + new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE), null); ScoreDoc[] docs = new ScoreDoc[3]; for (int j = 0; j < docs.length; j++) { docs[j] = new ScoreDoc(0, score--); @@ -687,8 +687,8 @@ public void testConsumerSortByField() throws Exception { max.updateAndGet(prev -> Math.max(prev, number)); FieldDoc[] fieldDocs = {new FieldDoc(0, Float.NaN, new Object[]{number})}; TopDocs topDocs = new TopFieldDocs(new TotalHits(1, Relation.EQUAL_TO), fieldDocs, sortFields); - QuerySearchResult result = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), i), - new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE)); + QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", i), + new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE), null); result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), docValueFormats); result.setShardIndex(i); result.size(size); @@ -727,8 +727,8 @@ public void testConsumerFieldCollapsing() throws Exception { Object[] values = {randomFrom(collapseValues)}; FieldDoc[] fieldDocs = {new FieldDoc(0, Float.NaN, values)}; TopDocs topDocs = new CollapseTopFieldDocs("field", new TotalHits(1, Relation.EQUAL_TO), fieldDocs, sortFields, values); - QuerySearchResult result = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), i), - new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE)); + QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", i), + new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE), null); result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), docValueFormats); result.setShardIndex(i); result.size(size); @@ -762,8 +762,8 @@ public void testConsumerSuggestions() throws Exception { int maxScoreCompletion = -1; CountDownLatch latch = new CountDownLatch(expectedNumResults); for (int i = 0; i < expectedNumResults; i++) { - QuerySearchResult result = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), i), - new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE)); + QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", i), + new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE), null); List>> suggestions = new ArrayList<>(); { @@ -891,8 +891,8 @@ public void onFinalReduce(List shards, TotalHits totalHits, Interna threads[i] = new Thread(() -> { int number = randomIntBetween(1, 1000); max.updateAndGet(prev -> Math.max(prev, number)); - QuerySearchResult result = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), id), - new SearchShardTarget("node", new ShardId("a", "b", id), null, OriginalIndices.NONE)); + QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", id), + new SearchShardTarget("node", new ShardId("a", "b", id), null, OriginalIndices.NONE), null); result.topDocs(new TopDocsAndMaxScore( new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[]{new ScoreDoc(0, number)}), number), new DocValueFormat[0]); @@ -947,8 +947,9 @@ public void testPartialMergeFailure() throws InterruptedException { for (int i = 0; i < expectedNumResults; i++) { final int index = i; threads[index] = new Thread(() -> { - QuerySearchResult result = new QuerySearchResult(new SearchContextId(UUIDs.randomBase64UUID(), index), - new SearchShardTarget("node", new ShardId("a", "b", index), null, OriginalIndices.NONE)); + QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), index), + new SearchShardTarget("node", new ShardId("a", "b", index), null, OriginalIndices.NONE), + null); result.topDocs(new TopDocsAndMaxScore( new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), Float.NaN), new DocValueFormat[0]); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java index cc48a60a72512..90555c819bca0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -39,7 +39,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.sort.SortBuilders; @@ -98,8 +98,8 @@ public void sendExecuteQuery(Transport.Connection connection, ShardSearchRequest assertNotEquals(shardId, (int) request.getBottomSortValues().getFormattedSortValues()[0]); numWithTopDocs.incrementAndGet(); } - QuerySearchResult queryResult = new QuerySearchResult(new SearchContextId("N/A", 123), - new SearchShardTarget("node1", new ShardId("idx", "na", shardId), null, OriginalIndices.NONE)); + QuerySearchResult queryResult = new QuerySearchResult(new ShardSearchContextId("N/A", 123), + new SearchShardTarget("node1", new ShardId("idx", "na", shardId), null, OriginalIndices.NONE), null); SortField sortField = new SortField("timestamp", SortField.Type.LONG); if (withCollapse) { queryResult.topDocs(new TopDocsAndMaxScore( diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 5ff66a0709701..d7f2bbe69bb05 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -164,6 +164,39 @@ public void testValidate() throws IOException { assertEquals(1, validationErrors.validationErrors().size()); assertEquals("using [rescore] is not allowed in a scroll context", validationErrors.validationErrors().get(0)); } + { + // Reader context with scroll + SearchRequest searchRequest = new SearchRequest() + .source(new SearchSourceBuilder().pointInTimeBuilder( + new SearchSourceBuilder.PointInTimeBuilder("id", TimeValue.timeValueMillis(randomIntBetween(1, 10))))) + .scroll(TimeValue.timeValueMillis(randomIntBetween(1, 100))); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals("using [point in time] is not allowed in a scroll context", validationErrors.validationErrors().get(0)); + } + { + // Reader context with preference + SearchRequest searchRequest = new SearchRequest() + .source(new SearchSourceBuilder(). + pointInTimeBuilder(new SearchSourceBuilder.PointInTimeBuilder("id", TimeValue.timeValueMillis(between(1, 10))))) + .preference("test"); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals("[preference] cannot be used with point in time", validationErrors.validationErrors().get(0)); + } + { + // Reader context with routing + SearchRequest searchRequest = new SearchRequest() + .source(new SearchSourceBuilder() + .pointInTimeBuilder(new SearchSourceBuilder.PointInTimeBuilder("id", TimeValue.timeValueMillis(between(1, 10))))) + .routing("test"); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals("[routing] cannot be used with point in time", validationErrors.validationErrors().get(0)); + } } public void testCopyConstructor() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java index 49eb7673592bf..0ca8c31037583 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java @@ -30,7 +30,7 @@ import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.InternalScrollSearchRequest; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.Transport; @@ -50,11 +50,11 @@ public class SearchScrollAsyncActionTests extends ESTestCase { public void testSendRequestsToNodes() throws InterruptedException { ParsedScrollId scrollId = getParsedScrollId( - new ScrollIdForNode(null, "node1", new SearchContextId(UUIDs.randomBase64UUID(), 1)), - new ScrollIdForNode(null, "node2", new SearchContextId(UUIDs.randomBase64UUID(), 2)), - new ScrollIdForNode(null, "node3", new SearchContextId(UUIDs.randomBase64UUID(), 17)), - new ScrollIdForNode(null, "node1", new SearchContextId(UUIDs.randomBase64UUID(), 0)), - new ScrollIdForNode(null, "node3", new SearchContextId(UUIDs.randomBase64UUID(), 0))); + new SearchContextIdForNode(null, "node1", new ShardSearchContextId(UUIDs.randomBase64UUID(), 1)), + new SearchContextIdForNode(null, "node2", new ShardSearchContextId(UUIDs.randomBase64UUID(), 2)), + new SearchContextIdForNode(null, "node3", new ShardSearchContextId(UUIDs.randomBase64UUID(), 17)), + new SearchContextIdForNode(null, "node1", new ShardSearchContextId(UUIDs.randomBase64UUID(), 0)), + new SearchContextIdForNode(null, "node3", new ShardSearchContextId(UUIDs.randomBase64UUID(), 0))); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)) .add(new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT)) @@ -108,10 +108,10 @@ protected void onFirstPhaseResult(int shardId, SearchAsyncActionTests.TestSearch latch.await(); ShardSearchFailure[] shardSearchFailures = action.buildShardFailures(); assertEquals(0, shardSearchFailures.length); - ScrollIdForNode[] context = scrollId.getContext(); + SearchContextIdForNode[] context = scrollId.getContext(); for (int i = 0; i < results.length(); i++) { assertNotNull(results.get(i)); - assertEquals(context[i].getContextId(), results.get(i).getContextId()); + assertEquals(context[i].getSearchContextId(), results.get(i).getContextId()); assertEquals(context[i].getNode(), results.get(i).node.getId()); } } @@ -119,11 +119,11 @@ protected void onFirstPhaseResult(int shardId, SearchAsyncActionTests.TestSearch public void testFailNextPhase() throws InterruptedException { ParsedScrollId scrollId = getParsedScrollId( - new ScrollIdForNode(null, "node1", new SearchContextId("", 1)), - new ScrollIdForNode(null, "node2", new SearchContextId("a", 2)), - new ScrollIdForNode(null, "node3", new SearchContextId("b", 17)), - new ScrollIdForNode(null, "node1", new SearchContextId("c", 0)), - new ScrollIdForNode(null, "node3", new SearchContextId("d", 0))); + new SearchContextIdForNode(null, "node1", new ShardSearchContextId("", 1)), + new SearchContextIdForNode(null, "node2", new ShardSearchContextId("a", 2)), + new SearchContextIdForNode(null, "node3", new ShardSearchContextId("b", 17)), + new SearchContextIdForNode(null, "node1", new ShardSearchContextId("c", 0)), + new SearchContextIdForNode(null, "node3", new ShardSearchContextId("d", 0))); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)) .add(new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT)) @@ -199,21 +199,21 @@ protected void onFirstPhaseResult(int shardId, SearchAsyncActionTests.TestSearch latch.await(); ShardSearchFailure[] shardSearchFailures = action.buildShardFailures(); assertEquals(0, shardSearchFailures.length); - ScrollIdForNode[] context = scrollId.getContext(); + SearchContextIdForNode[] context = scrollId.getContext(); for (int i = 0; i < results.length(); i++) { assertNotNull(results.get(i)); - assertEquals(context[i].getContextId(), results.get(i).getContextId()); + assertEquals(context[i].getSearchContextId(), results.get(i).getContextId()); assertEquals(context[i].getNode(), results.get(i).node.getId()); } } public void testNodeNotAvailable() throws InterruptedException { ParsedScrollId scrollId = getParsedScrollId( - new ScrollIdForNode(null, "node1", new SearchContextId("", 1)), - new ScrollIdForNode(null, "node2", new SearchContextId("", 2)), - new ScrollIdForNode(null, "node3", new SearchContextId("", 17)), - new ScrollIdForNode(null, "node1", new SearchContextId("", 0)), - new ScrollIdForNode(null, "node3", new SearchContextId("", 0))); + new SearchContextIdForNode(null, "node1", new ShardSearchContextId("", 1)), + new SearchContextIdForNode(null, "node2", new ShardSearchContextId("", 2)), + new SearchContextIdForNode(null, "node3", new ShardSearchContextId("", 17)), + new SearchContextIdForNode(null, "node1", new ShardSearchContextId("", 0)), + new SearchContextIdForNode(null, "node3", new ShardSearchContextId("", 0))); // node2 is not available DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)) @@ -275,13 +275,13 @@ protected void onFirstPhaseResult(int shardId, SearchAsyncActionTests.TestSearch // .reason() returns the full stack trace assertThat(shardSearchFailures[0].reason(), startsWith("java.lang.IllegalStateException: node [node2] is not available")); - ScrollIdForNode[] context = scrollId.getContext(); + SearchContextIdForNode[] context = scrollId.getContext(); for (int i = 0; i < results.length(); i++) { if (context[i].getNode().equals("node2")) { assertNull(results.get(i)); } else { assertNotNull(results.get(i)); - assertEquals(context[i].getContextId(), results.get(i).getContextId()); + assertEquals(context[i].getSearchContextId(), results.get(i).getContextId()); assertEquals(context[i].getNode(), results.get(i).node.getId()); } } @@ -289,11 +289,11 @@ protected void onFirstPhaseResult(int shardId, SearchAsyncActionTests.TestSearch public void testShardFailures() throws InterruptedException { ParsedScrollId scrollId = getParsedScrollId( - new ScrollIdForNode(null, "node1", new SearchContextId("", 1)), - new ScrollIdForNode(null, "node2", new SearchContextId("", 2)), - new ScrollIdForNode(null, "node3", new SearchContextId("",17)), - new ScrollIdForNode(null, "node1", new SearchContextId("", 0)), - new ScrollIdForNode(null, "node3", new SearchContextId("", 0))); + new SearchContextIdForNode(null, "node1", new ShardSearchContextId("", 1)), + new SearchContextIdForNode(null, "node2", new ShardSearchContextId("", 2)), + new SearchContextIdForNode(null, "node3", new ShardSearchContextId("",17)), + new SearchContextIdForNode(null, "node1", new ShardSearchContextId("", 0)), + new SearchContextIdForNode(null, "node3", new ShardSearchContextId("", 0))); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)) .add(new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT)) @@ -353,13 +353,13 @@ protected void onFirstPhaseResult(int shardId, SearchAsyncActionTests.TestSearch assertEquals(1, shardSearchFailures.length); assertThat(shardSearchFailures[0].reason(), containsString("IllegalArgumentException: BOOM on shard")); - ScrollIdForNode[] context = scrollId.getContext(); + SearchContextIdForNode[] context = scrollId.getContext(); for (int i = 0; i < results.length(); i++) { - if (context[i].getContextId().getId() == 17) { + if (context[i].getSearchContextId().getId() == 17) { assertNull(results.get(i)); } else { assertNotNull(results.get(i)); - assertEquals(context[i].getContextId(), results.get(i).getContextId()); + assertEquals(context[i].getSearchContextId(), results.get(i).getContextId()); assertEquals(context[i].getNode(), results.get(i).node.getId()); } } @@ -367,11 +367,11 @@ protected void onFirstPhaseResult(int shardId, SearchAsyncActionTests.TestSearch public void testAllShardsFailed() throws InterruptedException { ParsedScrollId scrollId = getParsedScrollId( - new ScrollIdForNode(null, "node1", new SearchContextId("", 1)), - new ScrollIdForNode(null, "node2", new SearchContextId("", 2)), - new ScrollIdForNode(null, "node3", new SearchContextId("", 17)), - new ScrollIdForNode(null, "node1", new SearchContextId("", 0)), - new ScrollIdForNode(null, "node3", new SearchContextId("", 0))); + new SearchContextIdForNode(null, "node1", new ShardSearchContextId("", 1)), + new SearchContextIdForNode(null, "node2", new ShardSearchContextId("", 2)), + new SearchContextIdForNode(null, "node3", new ShardSearchContextId("", 17)), + new SearchContextIdForNode(null, "node1", new ShardSearchContextId("", 0)), + new SearchContextIdForNode(null, "node3", new ShardSearchContextId("", 0))); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)) .add(new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT)) @@ -433,7 +433,7 @@ protected void onFirstPhaseResult(int shardId, SearchAsyncActionTests.TestSearch action.run(); latch.await(); - ScrollIdForNode[] context = scrollId.getContext(); + SearchContextIdForNode[] context = scrollId.getContext(); ShardSearchFailure[] shardSearchFailures = action.buildShardFailures(); assertEquals(context.length, shardSearchFailures.length); @@ -444,10 +444,10 @@ protected void onFirstPhaseResult(int shardId, SearchAsyncActionTests.TestSearch } } - private static ParsedScrollId getParsedScrollId(ScrollIdForNode... idsForNodes) { - List scrollIdForNodes = Arrays.asList(idsForNodes); - Collections.shuffle(scrollIdForNodes, random()); - return new ParsedScrollId("", "test", scrollIdForNodes.toArray(new ScrollIdForNode[0])); + private static ParsedScrollId getParsedScrollId(SearchContextIdForNode... idsForNodes) { + List searchContextIdForNodes = Arrays.asList(idsForNodes); + Collections.shuffle(searchContextIdForNodes, random()); + return new ParsedScrollId("", "test", searchContextIdForNodes.toArray(new SearchContextIdForNode[0])); } private ActionListener dummyListener() { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchScrollRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchScrollRequestTests.java index f661cf8e7e8a7..389d8933a23dd 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchScrollRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchScrollRequestTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.internal.InternalScrollSearchRequest; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -60,7 +60,7 @@ public void testSerialization() throws Exception { public void testInternalScrollSearchRequestSerialization() throws IOException { SearchScrollRequest searchScrollRequest = createSearchScrollRequest(); InternalScrollSearchRequest internalScrollSearchRequest = - new InternalScrollSearchRequest(searchScrollRequest, new SearchContextId(UUIDs.randomBase64UUID(), randomLong())); + new InternalScrollSearchRequest(searchScrollRequest, new ShardSearchContextId(UUIDs.randomBase64UUID(), randomLong())); try (BytesStreamOutput output = new BytesStreamOutput()) { internalScrollSearchRequest.writeTo(output); try (StreamInput in = output.bytes().streamInput()) { diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index b2ced205eda72..cd7eb980892cf 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -36,8 +36,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.GroupShardsIteratorTests; -import org.elasticsearch.cluster.routing.PlainShardIterator; -import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; @@ -144,7 +142,7 @@ public void testMergeShardsIterators() { List expected = new ArrayList<>(); String localClusterAlias = randomAlphaOfLengthBetween(5, 10); OriginalIndices localIndices = OriginalIndicesTests.randomOriginalIndices(); - List localShardIterators = new ArrayList<>(); + List localShardIterators = new ArrayList<>(); List remoteShardIterators = new ArrayList<>(); int numShards = randomIntBetween(0, 10); for (int i = 0; i < numShards; i++) { @@ -154,7 +152,7 @@ public void testMergeShardsIterators() { boolean localIndex = randomBoolean(); if (localIndex) { SearchShardIterator localIterator = createSearchShardIterator(i, index, localIndices, localClusterAlias); - localShardIterators.add(new PlainShardIterator(localIterator.shardId(), localIterator.getShardRoutings())); + localShardIterators.add(localIterator); if (rarely()) { String remoteClusterAlias = randomFrom(remoteClusters); //simulate scenario where the local cluster is also registered as a remote one @@ -191,11 +189,12 @@ public void testMergeShardsIterators() { } } + Collections.shuffle(localShardIterators, random()); Collections.shuffle(remoteShardIterators, random()); - GroupShardsIterator groupShardsIterator = TransportSearchAction.mergeShardsIterators( - new GroupShardsIterator<>(localShardIterators), localIndices, localClusterAlias, remoteShardIterators); + GroupShardsIterator groupShardsIterator = + TransportSearchAction.mergeShardsIterators(localShardIterators, remoteShardIterators); List result = new ArrayList<>(); for (SearchShardIterator searchShardIterator : groupShardsIterator) { result.add(searchShardIterator); @@ -367,7 +366,7 @@ private MockTransportService[] startTransport(int numClusters, DiscoveryNode[] n private static SearchResponse emptySearchResponse() { InternalSearchResponse response = new InternalSearchResponse(new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), InternalAggregations.EMPTY, null, null, false, null, 1); - return new SearchResponse(response, null, 1, 1, 0, 100, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + return new SearchResponse(response, null, 1, 1, 0, 100, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY, null); } public void testCCSRemoteReduceMergeFails() throws Exception { @@ -846,10 +845,9 @@ public void testShouldMinimizeRoundtrips() throws Exception { public void testShouldPreFilterSearchShards() { int numIndices = randomIntBetween(2, 10); - Index[] indices = new Index[numIndices]; + String[] indices = new String[numIndices]; for (int i = 0; i < numIndices; i++) { - String indexName = randomAlphaOfLengthBetween(5, 10); - indices[i] = new Index(indexName, indexName + "-uuid"); + indices[i] = randomAlphaOfLengthBetween(5, 10); } ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).build(); { @@ -889,16 +887,15 @@ public void testShouldPreFilterSearchShards() { public void testShouldPreFilterSearchShardsWithReadOnly() { int numIndices = randomIntBetween(2, 10); int numReadOnly = randomIntBetween(1, numIndices); - Index[] indices = new Index[numIndices]; + String[] indices = new String[numIndices]; ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder(); for (int i = 0; i < numIndices; i++) { - String indexName = randomAlphaOfLengthBetween(5, 10); - indices[i] = new Index(indexName, indexName + "-uuid"); + indices[i] = randomAlphaOfLengthBetween(5, 10);; if (--numReadOnly >= 0) { if (randomBoolean()) { - blocksBuilder.addIndexBlock(indexName, IndexMetadata.INDEX_WRITE_BLOCK); + blocksBuilder.addIndexBlock(indices[i], IndexMetadata.INDEX_WRITE_BLOCK); } else { - blocksBuilder.addIndexBlock(indexName, IndexMetadata.INDEX_READ_ONLY_BLOCK); + blocksBuilder.addIndexBlock(indices[i], IndexMetadata.INDEX_READ_ONLY_BLOCK); } } } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchHelperTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchHelperTests.java index c9d58c7dc9090..a4ae931d925b5 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchHelperTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchHelperTests.java @@ -24,62 +24,66 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; +import org.elasticsearch.test.VersionUtils; import static org.hamcrest.Matchers.equalTo; public class TransportSearchHelperTests extends ESTestCase { - public void testParseScrollId() throws IOException { + public static AtomicArray generateQueryResults() { AtomicArray array = new AtomicArray<>(3); DiscoveryNode node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult1 = - new SearchAsyncActionTests.TestSearchPhaseResult(new SearchContextId("x", 1), node1); + new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId("a", 1), node1); testSearchPhaseResult1.setSearchShardTarget(new SearchShardTarget("node_1", new ShardId("idx", "uuid1", 2), "cluster_x", null)); SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult2 = - new SearchAsyncActionTests.TestSearchPhaseResult(new SearchContextId("y", 12), node2); + new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId("b", 12), node2); testSearchPhaseResult2.setSearchShardTarget(new SearchShardTarget("node_2", new ShardId("idy", "uuid2", 42), "cluster_y", null)); SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult3 = - new SearchAsyncActionTests.TestSearchPhaseResult(new SearchContextId("z", 42), node3); + new SearchAsyncActionTests.TestSearchPhaseResult(new ShardSearchContextId("c", 42), node3); testSearchPhaseResult3.setSearchShardTarget(new SearchShardTarget("node_3", new ShardId("idy", "uuid2", 43), null, null)); array.setOnce(0, testSearchPhaseResult1); array.setOnce(1, testSearchPhaseResult2); array.setOnce(2, testSearchPhaseResult3); + return array; + } - boolean includeUUID = randomBoolean(); - String scrollId = TransportSearchHelper.buildScrollId(array, includeUUID); + public void testParseScrollId() { + final Version version = VersionUtils.randomVersion(random()); + boolean includeUUID = version.onOrAfter(Version.V_7_7_0); + final AtomicArray queryResults = generateQueryResults(); + String scrollId = TransportSearchHelper.buildScrollId(queryResults, version); ParsedScrollId parseScrollId = TransportSearchHelper.parseScrollId(scrollId); assertEquals(3, parseScrollId.getContext().length); assertEquals("node_1", parseScrollId.getContext()[0].getNode()); assertEquals("cluster_x", parseScrollId.getContext()[0].getClusterAlias()); - assertEquals(1, parseScrollId.getContext()[0].getContextId().getId()); + assertEquals(1, parseScrollId.getContext()[0].getSearchContextId().getId()); if (includeUUID) { - assertThat(parseScrollId.getContext()[0].getContextId().getReaderId(), equalTo("x")); + assertThat(parseScrollId.getContext()[0].getSearchContextId().getReaderId(), equalTo("a")); } else { - assertThat(parseScrollId.getContext()[0].getContextId().getReaderId(), equalTo("")); + assertThat(parseScrollId.getContext()[0].getSearchContextId().getReaderId(), equalTo("")); } assertEquals("node_2", parseScrollId.getContext()[1].getNode()); assertEquals("cluster_y", parseScrollId.getContext()[1].getClusterAlias()); - assertEquals(12, parseScrollId.getContext()[1].getContextId().getId()); + assertEquals(12, parseScrollId.getContext()[1].getSearchContextId().getId()); if (includeUUID) { - assertThat(parseScrollId.getContext()[1].getContextId().getReaderId(), equalTo("y")); + assertThat(parseScrollId.getContext()[1].getSearchContextId().getReaderId(), equalTo("b")); } else { - assertThat(parseScrollId.getContext()[1].getContextId().getReaderId(), equalTo("")); + assertThat(parseScrollId.getContext()[1].getSearchContextId().getReaderId(), equalTo("")); } assertEquals("node_3", parseScrollId.getContext()[2].getNode()); assertNull(parseScrollId.getContext()[2].getClusterAlias()); - assertEquals(42, parseScrollId.getContext()[2].getContextId().getId()); + assertEquals(42, parseScrollId.getContext()[2].getSearchContextId().getId()); if (includeUUID) { - assertThat(parseScrollId.getContext()[2].getContextId().getReaderId(), equalTo("z")); + assertThat(parseScrollId.getContext()[2].getSearchContextId().getReaderId(), equalTo("c")); } else { - assertThat(parseScrollId.getContext()[2].getContextId().getReaderId(), equalTo("")); + assertThat(parseScrollId.getContext()[2].getSearchContextId().getReaderId(), equalTo("")); } } } diff --git a/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java b/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java index 2a103385ebd60..3d09600ab96dc 100644 --- a/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java @@ -26,12 +26,14 @@ import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.AbstractClientHeadersTestCase; import org.elasticsearch.client.Client; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; import java.util.Collections; import java.util.HashMap; +import java.util.List; public class NodeClientHeadersTests extends AbstractClientHeadersTestCase { @@ -43,7 +45,7 @@ protected Client buildClient(Settings headersSettings, ActionType[] testedAction TaskManager taskManager = new TaskManager(settings, threadPool, Collections.emptySet()); Actions actions = new Actions(testedActions, taskManager); NodeClient client = new NodeClient(settings, threadPool); - client.initialize(actions, taskManager, () -> "test", null); + client.initialize(actions, taskManager, () -> "test", null, new NamedWriteableRegistry(List.of())); return client; } diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 9447ac6e43342..c2a8425134cf3 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -84,11 +84,10 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.plugins.IndexStorePlugin; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.TestSearchContext; import org.elasticsearch.test.engine.MockEngineFactory; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -288,9 +287,8 @@ public void testAddSearchOperationListener() throws IOException { IndexModule module = createIndexModule(indexSettings, emptyAnalysisRegistry); AtomicBoolean executed = new AtomicBoolean(false); SearchOperationListener listener = new SearchOperationListener() { - @Override - public void onNewContext(SearchContext context) { + public void onNewReaderContext(ReaderContext readerContext) { executed.set(true); } }; @@ -303,9 +301,8 @@ public void onNewContext(SearchContext context) { assertEquals(2, indexService.getSearchOperationListener().size()); assertEquals(SearchSlowLog.class, indexService.getSearchOperationListener().get(0).getClass()); assertSame(listener, indexService.getSearchOperationListener().get(1)); - for (SearchOperationListener l : indexService.getSearchOperationListener()) { - l.onNewContext(new TestSearchContext(null)); + l.onNewReaderContext(mock(ReaderContext.class)); } assertTrue(executed.get()); indexService.close("simon says", false); diff --git a/server/src/test/java/org/elasticsearch/index/shard/SearchOperationListenerTests.java b/server/src/test/java/org/elasticsearch/index/shard/SearchOperationListenerTests.java index 28bab8da0fdfb..edca5df7e4d5d 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/SearchOperationListenerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/SearchOperationListenerTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.index.shard; +import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TestSearchContext; @@ -33,6 +34,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; public class SearchOperationListenerTests extends ESTestCase { @@ -90,32 +92,32 @@ public void onFetchPhase(SearchContext searchContext, long tookInNanos) { } @Override - public void onNewContext(SearchContext context) { - assertNotNull(context); + public void onNewReaderContext(ReaderContext readerContext) { + assertNotNull(readerContext); newContext.incrementAndGet(); } @Override - public void onFreeContext(SearchContext context) { - assertNotNull(context); + public void onFreeReaderContext(ReaderContext readerContext) { + assertNotNull(readerContext); freeContext.incrementAndGet(); } @Override - public void onNewScrollContext(SearchContext context) { - assertNotNull(context); + public void onNewScrollContext(ReaderContext readerContext) { + assertNotNull(readerContext); newScrollContext.incrementAndGet(); } @Override - public void onFreeScrollContext(SearchContext context) { - assertNotNull(context); + public void onFreeScrollContext(ReaderContext readerContext) { + assertNotNull(readerContext); freeScrollContext.incrementAndGet(); } @Override - public void validateSearchContext(SearchContext context, TransportRequest request) { - assertNotNull(context); + public void validateSearchContext(ReaderContext readerContext, TransportRequest request) { + assertNotNull(readerContext); validateSearchContext.incrementAndGet(); } }; @@ -216,7 +218,7 @@ public void validateSearchContext(SearchContext context, TransportRequest reques assertEquals(0, freeScrollContext.get()); assertEquals(0, validateSearchContext.get()); - compositeListener.onNewContext(ctx); + compositeListener.onNewReaderContext(mock(ReaderContext.class)); assertEquals(2, preFetch.get()); assertEquals(2, preQuery.get()); assertEquals(2, failedFetch.get()); @@ -229,7 +231,7 @@ public void validateSearchContext(SearchContext context, TransportRequest reques assertEquals(0, freeScrollContext.get()); assertEquals(0, validateSearchContext.get()); - compositeListener.onNewScrollContext(ctx); + compositeListener.onNewScrollContext(mock(ReaderContext.class)); assertEquals(2, preFetch.get()); assertEquals(2, preQuery.get()); assertEquals(2, failedFetch.get()); @@ -242,7 +244,7 @@ public void validateSearchContext(SearchContext context, TransportRequest reques assertEquals(0, freeScrollContext.get()); assertEquals(0, validateSearchContext.get()); - compositeListener.onFreeContext(ctx); + compositeListener.onFreeReaderContext(mock(ReaderContext.class)); assertEquals(2, preFetch.get()); assertEquals(2, preQuery.get()); assertEquals(2, failedFetch.get()); @@ -255,7 +257,7 @@ public void validateSearchContext(SearchContext context, TransportRequest reques assertEquals(0, freeScrollContext.get()); assertEquals(0, validateSearchContext.get()); - compositeListener.onFreeScrollContext(ctx); + compositeListener.onFreeScrollContext(mock(ReaderContext.class)); assertEquals(2, preFetch.get()); assertEquals(2, preQuery.get()); assertEquals(2, failedFetch.get()); @@ -269,10 +271,10 @@ public void validateSearchContext(SearchContext context, TransportRequest reques assertEquals(0, validateSearchContext.get()); if (throwingListeners == 0) { - compositeListener.validateSearchContext(ctx, Empty.INSTANCE); + compositeListener.validateSearchContext(mock(ReaderContext.class), Empty.INSTANCE); } else { - RuntimeException expected = - expectThrows(RuntimeException.class, () -> compositeListener.validateSearchContext(ctx, Empty.INSTANCE)); + RuntimeException expected = expectThrows(RuntimeException.class, + () -> compositeListener.validateSearchContext(mock(ReaderContext.class), Empty.INSTANCE)); assertNull(expected.getMessage()); assertEquals(throwingListeners - 1, expected.getSuppressed().length); if (throwingListeners > 1) { diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index fffc0aa24148a..28b322c77fac3 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -79,6 +79,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; @@ -204,7 +205,7 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m Map actions = new HashMap<>(); actions.put(TransportVerifyShardBeforeCloseAction.TYPE, new TransportVerifyShardBeforeCloseAction(SETTINGS, transportService, clusterService, indicesService, threadPool, null, actionFilters)); - client.initialize(actions, transportService.getTaskManager(), null, null); + client.initialize(actions, transportService.getTaskManager(), null, null, new NamedWriteableRegistry(List.of())); ShardLimitValidator shardLimitValidator = new ShardLimitValidator(SETTINGS, clusterService); MetadataIndexStateService indexStateService = new MetadataIndexStateService(clusterService, allocationService, diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java index 4813e11e15bfc..6711beb52bdca 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -44,6 +45,7 @@ import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static java.util.Collections.emptyMap; @@ -80,7 +82,7 @@ protected void doExecute(Task task, ActionRequest request, ActionListener listen final Map actions = new HashMap<>(); actions.put(ValidateQueryAction.INSTANCE, transportAction); - client.initialize(actions, taskManager, () -> "local", null); + client.initialize(actions, taskManager, () -> "local", null, new NamedWriteableRegistry(List.of())); controller.registerHandler(action); } diff --git a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index 5be92101e6360..91ef4c996ffe8 100644 --- a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -51,15 +50,20 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.ScrollContext; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.LegacyReaderContext; +import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.rescore.RescoreContext; import org.elasticsearch.search.slice.SliceBuilder; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import java.io.IOException; import java.util.UUID; +import java.util.function.Function; +import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; import static org.mockito.Matchers.anyObject; @@ -77,9 +81,11 @@ public void testPreProcess() throws Exception { ShardId shardId = new ShardId("index", UUID.randomUUID().toString(), 1); when(shardSearchRequest.shardId()).thenReturn(shardId); + ThreadPool threadPool = new TestThreadPool(this.getClass().getName()); IndexShard indexShard = mock(IndexShard.class); QueryCachingPolicy queryCachingPolicy = mock(QueryCachingPolicy.class); when(indexShard.getQueryCachingPolicy()).thenReturn(queryCachingPolicy); + when(indexShard.getThreadPool()).thenReturn(threadPool); int maxResultWindow = randomIntBetween(50, 100); int maxRescoreWindow = randomIntBetween(50, 100); @@ -112,27 +118,49 @@ public void testPreProcess() throws Exception { BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); try (Directory dir = newDirectory(); - RandomIndexWriter w = new RandomIndexWriter(random(), dir); - IndexReader reader = w.getReader(); - Engine.Searcher searcher = new Engine.Searcher("test", reader, - IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), - IndexSearcher.getDefaultQueryCachingPolicy(), reader)) { + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + + + final Supplier searcherSupplier = () -> new Engine.SearcherSupplier(Function.identity()) { + @Override + protected void doClose() { + } + + @Override + protected Engine.Searcher acquireSearcherInternal(String source) { + try { + IndexReader reader = w.getReader(); + return new Engine.Searcher("test", reader, IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), reader); + } catch (IOException exc) { + throw new AssertionError(exc); + } + } + }; SearchShardTarget target = new SearchShardTarget("node", shardId, null, OriginalIndices.NONE); - DefaultSearchContext context1 = new DefaultSearchContext(new SearchContextId(UUIDs.randomBase64UUID(), 1L), - shardSearchRequest, target, searcher, null, indexService, indexShard, bigArrays, null, timeout, null, false); - context1.from(300); + ReaderContext readerWithoutScroll = new ReaderContext( + randomNonNegativeLong(), indexService, indexShard, searcherSupplier.get(), randomNonNegativeLong(), false); + DefaultSearchContext contextWithoutScroll = new DefaultSearchContext(readerWithoutScroll, shardSearchRequest, target, null, + bigArrays, null, timeout, null, false); + contextWithoutScroll.from(300); + contextWithoutScroll.close(); // resultWindow greater than maxResultWindow and scrollContext is null - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> context1.preProcess(false)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> contextWithoutScroll.preProcess(false)); assertThat(exception.getMessage(), equalTo("Result window is too large, from + size must be less than or equal to:" + " [" + maxResultWindow + "] but was [310]. See the scroll api for a more efficient way to request large data sets. " + "This limit can be set by changing the [" + IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey() + "] index level setting.")); // resultWindow greater than maxResultWindow and scrollContext isn't null - context1.scrollContext(new ScrollContext()); + when(shardSearchRequest.scroll()).thenReturn(new Scroll(TimeValue.timeValueMillis(randomInt(1000)))); + ReaderContext readerContext = new LegacyReaderContext( + randomNonNegativeLong(), indexService, indexShard, searcherSupplier.get(), shardSearchRequest, randomNonNegativeLong()); + DefaultSearchContext context1 = new DefaultSearchContext(readerContext, shardSearchRequest, target, null, + bigArrays, null, timeout, null, false); + context1.from(300); exception = expectThrows(IllegalArgumentException.class, () -> context1.preProcess(false)); assertThat(exception.getMessage(), equalTo("Batch size is too large, size must be less than or equal to: [" + maxResultWindow + "] but was [310]. Scroll batch sizes cost as much memory as result windows so they are " @@ -160,9 +188,12 @@ public void testPreProcess() throws Exception { + "to be rescored. This limit can be set by changing the [" + IndexSettings.MAX_RESCORE_WINDOW_SETTING.getKey() + "] index level setting.")); + readerContext.close(); + readerContext = new ReaderContext( + randomNonNegativeLong(), indexService, indexShard, searcherSupplier.get(), randomNonNegativeLong(), false); // rescore is null but sliceBuilder is not null - DefaultSearchContext context2 = new DefaultSearchContext(new SearchContextId(UUIDs.randomBase64UUID(), 2L), - shardSearchRequest, target, searcher, null, indexService, indexShard, bigArrays, null, timeout, null, false); + DefaultSearchContext context2 = new DefaultSearchContext(readerContext, shardSearchRequest, target, + null, bigArrays, null, timeout, null, false); SliceBuilder sliceBuilder = mock(SliceBuilder.class); int numSlices = maxSlicesPerScroll + randomIntBetween(1, 100); @@ -178,8 +209,8 @@ public void testPreProcess() throws Exception { when(shardSearchRequest.getAliasFilter()).thenReturn(AliasFilter.EMPTY); when(shardSearchRequest.indexBoost()).thenReturn(AbstractQueryBuilder.DEFAULT_BOOST); - DefaultSearchContext context3 = new DefaultSearchContext(new SearchContextId(UUIDs.randomBase64UUID(), 3L), - shardSearchRequest, target, searcher, null, indexService, indexShard, bigArrays, null, timeout, null, false); + DefaultSearchContext context3 = new DefaultSearchContext(readerContext, shardSearchRequest, target, null, + bigArrays, null, timeout, null, false); ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery(); context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(false); assertEquals(context3.query(), context3.buildFilteredQuery(parsedQuery.query())); @@ -188,14 +219,19 @@ public void testPreProcess() throws Exception { when(queryShardContext.fieldMapper(anyString())).thenReturn(mock(MappedFieldType.class)); when(shardSearchRequest.indexRoutings()).thenReturn(new String[0]); - DefaultSearchContext context4 = new DefaultSearchContext(new SearchContextId(UUIDs.randomBase64UUID(), 4L), - shardSearchRequest, target, searcher, null, indexService, indexShard, bigArrays, null, timeout, null, false); + readerContext.close(); + readerContext = new ReaderContext(randomNonNegativeLong(), indexService, indexShard, + searcherSupplier.get(), randomNonNegativeLong(), false); + DefaultSearchContext context4 = + new DefaultSearchContext(readerContext, shardSearchRequest, target, null, bigArrays, null, timeout, null, false); context4.sliceBuilder(new SliceBuilder(1,2)).parsedQuery(parsedQuery).preProcess(false); Query query1 = context4.query(); context4.sliceBuilder(new SliceBuilder(0,2)).parsedQuery(parsedQuery).preProcess(false); Query query2 = context4.query(); assertTrue(query1 instanceof MatchNoDocsQuery || query2 instanceof MatchNoDocsQuery); + readerContext.close(); + threadPool.shutdown(); } } } diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index c45f875a56768..300ba1e504578 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -49,6 +49,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; @@ -79,12 +80,14 @@ import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchRequest; import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.junit.Before; import java.io.IOException; @@ -173,17 +176,6 @@ protected Map, Object>> pluginScripts() { @Override public void onIndexModule(IndexModule indexModule) { indexModule.addSearchOperationListener(new SearchOperationListener() { - @Override - public void onNewContext(SearchContext context) { - if (context.query() != null) { - if ("throttled_threadpool_index".equals(context.indexShard().shardId().getIndex().getName())) { - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search_throttled]")); - } else { - assertThat(Thread.currentThread().getName(), startsWith("elasticsearch[node_s_0][search]")); - } - } - } - @Override public void onFetchPhase(SearchContext context, long tookInNanos) { if ("throttled_threadpool_index".equals(context.indexShard().shardId().getIndex().getName())) { @@ -322,6 +314,7 @@ public void onFailure(Exception e) { new ShardSearchRequest(OriginalIndices.NONE, useScroll ? scrollSearchRequest : searchRequest, indexShard.shardId(), 1, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, -1, null, null), + true, new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()), result); SearchPhaseResult searchPhaseResult = result.get(); IntArrayList intCursors = new IntArrayList(1); @@ -332,7 +325,7 @@ public void onFailure(Exception e) { listener.get(); if (useScroll) { // have to free context since this test does not remove the index from IndicesService. - service.freeContext(searchPhaseResult.getContextId()); + service.freeReaderContext(searchPhaseResult.getContextId()); } } catch (ExecutionException ex) { assertThat(ex.getCause(), instanceOf(RuntimeException.class)); @@ -341,7 +334,7 @@ public void onFailure(Exception e) { } catch (AlreadyClosedException ex) { throw ex; } catch (IllegalStateException ex) { - assertEquals("search context is already closed can't increment refCount current count [0]", ex.getMessage()); + assertEquals("reader_context is already closed can't increment refCount current count [0]", ex.getMessage()); } catch (SearchContextMissingException ex) { // that's fine } @@ -389,7 +382,7 @@ public void testSearchWhileIndexDeletedDoesNotLeakSearchContext() throws Executi new ShardSearchRequest(OriginalIndices.NONE, useScroll ? scrollSearchRequest : searchRequest, new ShardId(resolveIndex("index"), 0), 1, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, -1, null, null), - new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()), result); + randomBoolean(), new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()), result); try { result.get(); @@ -414,42 +407,34 @@ public void testTimeout() throws IOException { final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); final IndexShard indexShard = indexService.getShard(0); SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); - final SearchContext contextWithDefaultTimeout = service.createContext( - new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 1, - new AliasFilter(null, Strings.EMPTY_ARRAY), - 1.0f, -1, null, null - ), null); - try { + final ShardSearchRequest requestWithDefaultTimeout = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 1, + new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f, -1, null, null); + + try (ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext contextWithDefaultTimeout = service.createContext(reader, requestWithDefaultTimeout, null, randomBoolean())) { // the search context should inherit the default timeout assertThat(contextWithDefaultTimeout.timeout(), equalTo(TimeValue.timeValueSeconds(5))); - } finally { - contextWithDefaultTimeout.decRef(); - service.freeContext(contextWithDefaultTimeout.id()); } final long seconds = randomIntBetween(6, 10); searchRequest.source(new SearchSourceBuilder().timeout(TimeValue.timeValueSeconds(seconds))); - final SearchContext context = service.createContext( - new ShardSearchRequest( - OriginalIndices.NONE, - searchRequest, - indexShard.shardId(), - 1, - new AliasFilter(null, Strings.EMPTY_ARRAY), - 1.0f, -1, null, null - ), null); - try { + final ShardSearchRequest requestWithCustomTimeout = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + indexShard.shardId(), + 1, + new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f, -1, null, null); + try (ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext context = service.createContext(reader, requestWithCustomTimeout, null, randomBoolean())) { // the search context should inherit the query timeout assertThat(context.timeout(), equalTo(TimeValue.timeValueSeconds(seconds))); - } finally { - context.decRef(); - service.freeContext(context.id()); } - } /** @@ -469,19 +454,20 @@ public void testMaxDocvalueFieldsSearch() throws IOException { for (int i = 0; i < indexService.getIndexSettings().getMaxDocvalueFields(); i++) { searchSourceBuilder.docValueField("field" + i); } - try (SearchContext context = service.createContext( - new ShardSearchRequest(OriginalIndices.NONE, - searchRequest, indexShard.shardId(), 1, - new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, -1, null, null - ), null)) { + final ShardSearchRequest request = new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 1, + new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, -1, null, null); + try (ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext context = service.createContext(reader, request, null, randomBoolean())) { assertNotNull(context); - searchSourceBuilder.docValueField("one_field_too_much"); + } + searchSourceBuilder.docValueField("one_field_too_much"); + try (ReaderContext reader = createReaderContext(indexService, indexShard)) { IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, - () -> service.createContext(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 1, - new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, -1, null, null), null)); + () -> service.createContext(reader, request, null, randomBoolean())); assertEquals( "Trying to retrieve too many docvalue_fields. Must be less than or equal to: [100] but was [101]. " - + "This limit can be set by changing the [index.max_docvalue_fields_search] index level setting.", ex.getMessage()); + + "This limit can be set by changing the [index.max_docvalue_fields_search] index level setting.", + ex.getMessage()); } } @@ -504,15 +490,17 @@ public void testMaxScriptFieldsSearch() throws IOException { searchSourceBuilder.scriptField("field" + i, new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap())); } - try (SearchContext context = service.createContext(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, - indexShard.shardId(), 1, new AliasFilter(null, Strings.EMPTY_ARRAY), - 1.0f, -1, null, null), null)) { - assertNotNull(context); + final ShardSearchRequest request = new ShardSearchRequest(OriginalIndices.NONE, searchRequest, + indexShard.shardId(), 1, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, -1, null, null); + + try(ReaderContext reader = createReaderContext(indexService, indexShard)) { + try (SearchContext context = service.createContext(reader, request, null, randomBoolean())) { + assertNotNull(context); + } searchSourceBuilder.scriptField("anotherScriptField", new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap())); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, - () -> service.createContext(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 1, - new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, -1, null, null), null)); + () -> service.createContext(reader, request, null, randomBoolean())); assertEquals( "Trying to retrieve too many script_fields. Must be less than or equal to: [" + maxScriptFields + "] but was [" + (maxScriptFields + 1) @@ -534,17 +522,19 @@ public void testIgnoreScriptfieldIfSizeZero() throws IOException { searchSourceBuilder.scriptField("field" + 0, new Script(ScriptType.INLINE, MockScriptEngine.NAME, CustomScriptPlugin.DUMMY_SCRIPT, Collections.emptyMap())); searchSourceBuilder.size(0); - try (SearchContext context = service.createContext(new ShardSearchRequest(OriginalIndices.NONE, + final ShardSearchRequest request = new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 1, new AliasFilter(null, Strings.EMPTY_ARRAY), - 1.0f, -1, null, null), null)) { - assertEquals(0, context.scriptFields().fields().size()); + 1.0f, -1, null, null); + try (ReaderContext reader = createReaderContext(indexService, indexShard); + SearchContext context = service.createContext(reader, request, null, randomBoolean())) { + assertEquals(0, context.scriptFields().fields().size()); } } /** * test that creating more than the allowed number of scroll contexts throws an exception */ - public void testMaxOpenScrollContexts() throws RuntimeException { + public void testMaxOpenScrollContexts() throws Exception { createIndex("index"); client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); @@ -570,8 +560,10 @@ public void testMaxOpenScrollContexts() throws RuntimeException { client().prepareSearch("index").setSize(1).setScroll("1m").get(); } + final ShardScrollRequestTest request = new ShardScrollRequestTest(indexShard.shardId()); ElasticsearchException ex = expectThrows(ElasticsearchException.class, - () -> service.createAndPutContext(new ShardScrollRequestTest(indexShard.shardId()), null)); + () -> service.createAndPutReaderContext( + request, indexService, indexShard, indexShard.acquireSearcherSupplier(), randomBoolean())); assertEquals( "Trying to create too many scroll contexts. Must be less than or equal to: [" + SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY) + "]. " + @@ -584,7 +576,8 @@ public void testMaxOpenScrollContexts() throws RuntimeException { public void testOpenScrollContextsConcurrently() throws Exception { createIndex("index"); final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexShard indexShard = indicesService.indexServiceSafe(resolveIndex("index")).getShard(0); + final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); + final IndexShard indexShard = indexService.getShard(0); final int maxScrollContexts = SearchService.MAX_OPEN_SCROLL_CONTEXT.get(Settings.EMPTY); final SearchService searchService = getInstanceFromNode(SearchService.class); @@ -596,8 +589,10 @@ public void testOpenScrollContextsConcurrently() throws Exception { try { latch.await(); for (; ; ) { + final Engine.SearcherSupplier reader = indexShard.acquireSearcherSupplier(); try { - searchService.createAndPutContext(new ShardScrollRequestTest(indexShard.shardId()), null); + searchService.createAndPutReaderContext( + new ShardScrollRequestTest(indexShard.shardId()), indexService, indexShard, reader, true); } catch (ElasticsearchException e) { assertThat(e.getMessage(), equalTo( "Trying to create too many scroll contexts. Must be less than or equal to: " + @@ -690,7 +685,7 @@ public Scroll scroll() { } } - public void testCanMatch() throws IOException, InterruptedException { + public void testCanMatch() throws Exception { createIndex("index"); final SearchService service = getInstanceFromNode(SearchService.class); final IndicesService indicesService = getInstanceFromNode(IndicesService.class); @@ -741,7 +736,7 @@ public void testCanMatch() throws IOException, InterruptedException { CountDownLatch latch = new CountDownLatch(1); SearchShardTask task = new SearchShardTask(123L, "", "", "", null, Collections.emptyMap()); - service.executeQueryPhase(request, task, new ActionListener() { + service.executeQueryPhase(request, randomBoolean(), task, new ActionListener() { @Override public void onResponse(SearchPhaseResult searchPhaseResult) { try { @@ -890,18 +885,18 @@ public void testCreateSearchContextFailure() throws IOException { final IndexService indexService = createIndex(index); final SearchService service = getInstanceFromNode(SearchService.class); final ShardId shardId = new ShardId(indexService.index(), 0); - IndexShard indexShard = indexService.getShard(0); - - NullPointerException e = expectThrows(NullPointerException.class, - () -> service.createContext( - new ShardSearchRequest(shardId, 0, AliasFilter.EMPTY) { - @Override - public SearchType searchType() { - // induce an artificial NPE - throw new NullPointerException("expected"); - } - }, null)); - assertEquals("expected", e.getMessage()); + final ShardSearchRequest request = new ShardSearchRequest(shardId, 0, null) { + @Override + public SearchType searchType() { + // induce an artificial NPE + throw new NullPointerException("expected"); + } + }; + try (ReaderContext reader = createReaderContext(indexService, indexService.getShard(shardId.id()))) { + NullPointerException e = expectThrows(NullPointerException.class, + () -> service.createContext(reader, request, null, randomBoolean())); + assertEquals("expected", e.getMessage()); + } assertEquals("should have 2 store refs (IndexService + InternalEngine)", 2, indexService.getShard(0).store().refCount()); } @@ -923,7 +918,7 @@ public void testMatchNoDocsEmptyResponse() throws InterruptedException { { CountDownLatch latch = new CountDownLatch(1); shardRequest.source().query(new MatchAllQueryBuilder()); - service.executeQueryPhase(shardRequest, task, new ActionListener<>() { + service.executeQueryPhase(shardRequest, randomBoolean(), task, new ActionListener<>() { @Override public void onResponse(SearchPhaseResult result) { try { @@ -953,7 +948,7 @@ public void onFailure(Exception exc) { { CountDownLatch latch = new CountDownLatch(1); shardRequest.source().query(new MatchNoneQueryBuilder()); - service.executeQueryPhase(shardRequest, task, new ActionListener<>() { + service.executeQueryPhase(shardRequest, randomBoolean(), task, new ActionListener<>() { @Override public void onResponse(SearchPhaseResult result) { try { @@ -983,7 +978,7 @@ public void onFailure(Exception exc) { { CountDownLatch latch = new CountDownLatch(1); shardRequest.canReturnNullResponseIfMatchNoDocs(true); - service.executeQueryPhase(shardRequest, task, new ActionListener<>() { + service.executeQueryPhase(shardRequest, randomBoolean(), task, new ActionListener<>() { @Override public void onResponse(SearchPhaseResult result) { try { @@ -1046,32 +1041,55 @@ public void testLookUpSearchContext() throws Exception { IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); IndexShard indexShard = indexService.getShard(0); - ShardSearchRequest shardSearchRequest = new ShardSearchRequest( - OriginalIndices.NONE, new SearchRequest().allowPartialSearchResults(true), - indexShard.shardId(), 1, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, -1, null, null); - List contextIds = new ArrayList<>(); + List contextIds = new ArrayList<>(); int numContexts = randomIntBetween(1, 10); - for (int i = 0; i < numContexts; i++) { - final SearchContext searchContext = searchService.createContext(shardSearchRequest, null); - assertThat(searchContext.id().getId(), equalTo((long) (i + 1))); - searchService.putContext(searchContext); - contextIds.add(searchContext.id()); - } - assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); - while (contextIds.isEmpty() == false) { - final SearchContextId contextId = randomFrom(contextIds); - assertFalse(searchService.freeContext(new SearchContextId(UUIDs.randomBase64UUID(), contextId.getId()))); - assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); - if (randomBoolean()) { - assertTrue(searchService.freeContext(contextId)); - } else { - assertTrue(searchService.freeContext((new SearchContextId("", contextId.getId())))); + CountDownLatch latch = new CountDownLatch(1); + indexShard.getThreadPool().executor(ThreadPool.Names.SEARCH).execute(() -> { + try { + for (int i = 0; i < numContexts; i++) { + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, new SearchRequest().allowPartialSearchResults(true), + indexShard.shardId(), 1, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, -1, null, null); + final ReaderContext context = searchService.createAndPutReaderContext(request, indexService, indexShard, + indexShard.acquireSearcherSupplier(), randomBoolean()); + assertThat(context.id().getId(), equalTo((long) (i + 1))); + contextIds.add(context.id()); + } + assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); + while (contextIds.isEmpty() == false) { + final ShardSearchContextId contextId = randomFrom(contextIds); + assertFalse(searchService.freeReaderContext(new ShardSearchContextId(UUIDs.randomBase64UUID(), contextId.getId()))); + assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); + if (randomBoolean()) { + assertTrue(searchService.freeReaderContext(contextId)); + } else { + assertTrue(searchService.freeReaderContext((new ShardSearchContextId("", contextId.getId())))); + } + contextIds.remove(contextId); + assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); + assertFalse(searchService.freeReaderContext(new ShardSearchContextId("", contextId.getId()))); + assertFalse(searchService.freeReaderContext(contextId)); + assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); + } + } finally { + latch.countDown(); } - contextIds.remove(contextId); - assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); - assertFalse(searchService.freeContext(new SearchContextId("", contextId.getId()))); - assertFalse(searchService.freeContext(contextId)); - assertThat(searchService.getActiveContexts(), equalTo(contextIds.size())); - } + }); + latch.await(); + } + + public void testOpenReaderContext() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.openReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + future.actionGet(); + assertThat(searchService.getActiveContexts(), equalTo(1)); + assertTrue(searchService.freeReaderContext(future.actionGet())); + } + + private ReaderContext createReaderContext(IndexService indexService, IndexShard indexShard) { + return new ReaderContext(randomNonNegativeLong(), indexService, indexShard, + indexShard.acquireSearcherSupplier(), randomNonNegativeLong(), false); } } diff --git a/server/src/test/java/org/elasticsearch/search/internal/ScrollContextTests.java b/server/src/test/java/org/elasticsearch/search/internal/ScrollContextTests.java deleted file mode 100644 index de4863dd92a08..0000000000000 --- a/server/src/test/java/org/elasticsearch/search/internal/ScrollContextTests.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.internal; - -import org.elasticsearch.test.ESTestCase; - -public class ScrollContextTests extends ESTestCase { - - public void testStoringObjectsInScrollContext() { - final ScrollContext scrollContext = new ScrollContext(); - final String key = randomAlphaOfLengthBetween(1, 16); - assertNull(scrollContext.getFromContext(key)); - - final String value = randomAlphaOfLength(6); - scrollContext.putInContext(key, value); - - assertEquals(value, scrollContext.getFromContext(key)); - } -} diff --git a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java index 179b3657aea40..134c865bdc5da 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QueryPhaseTests.java @@ -320,13 +320,12 @@ public void testInOrderScrollOptimization() throws Exception { } w.close(); IndexReader reader = DirectoryReader.open(dir); - TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader)); - context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); ScrollContext scrollContext = new ScrollContext(); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); scrollContext.lastEmittedDoc = null; scrollContext.maxScore = Float.NaN; scrollContext.totalHits = null; - context.scrollContext(scrollContext); context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); int size = randomIntBetween(2, 5); context.setSize(size); @@ -583,13 +582,12 @@ public void testIndexSortScrollOptimization() throws Exception { // search sort is a prefix of the index sort searchSortAndFormats.add(new SortAndFormats(new Sort(indexSort.getSort()[0]), new DocValueFormat[]{DocValueFormat.RAW})); for (SortAndFormats searchSortAndFormat : searchSortAndFormats) { - TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader)); - context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); ScrollContext scrollContext = new ScrollContext(); + TestSearchContext context = new TestSearchContext(null, indexShard, newContextSearcher(reader), scrollContext); + context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery())); scrollContext.lastEmittedDoc = null; scrollContext.maxScore = Float.NaN; scrollContext.totalHits = null; - context.scrollContext(scrollContext); context.setTask(new SearchShardTask(123L, "", "", "", null, Collections.emptyMap())); context.setSize(10); context.sort(searchSortAndFormat); diff --git a/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java index d869835844786..0eede324c9437 100644 --- a/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java +++ b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java @@ -24,6 +24,10 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.Version; import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.OriginalIndicesTests; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.settings.Settings; @@ -33,7 +37,9 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregationsTests; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.suggest.SuggestTests; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; @@ -51,8 +57,11 @@ public QuerySearchResultTests() { private static QuerySearchResult createTestInstance() throws Exception { ShardId shardId = new ShardId("index", "uuid", randomInt()); - QuerySearchResult result = new QuerySearchResult(new SearchContextId("", randomLong()), - new SearchShardTarget("node", shardId, null, OriginalIndices.NONE)); + SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(randomBoolean()); + ShardSearchRequest shardSearchRequest = new ShardSearchRequest(OriginalIndicesTests.randomOriginalIndices(), searchRequest, + shardId, 1, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f, randomNonNegativeLong(), null, new String[0]); + QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId(UUIDs.base64UUID(), randomLong()), + new SearchShardTarget("node", shardId, null, OriginalIndices.NONE), shardSearchRequest); if (randomBoolean()) { result.terminatedEarly(randomBoolean()); } @@ -73,7 +82,7 @@ public void testSerialization() throws Exception { QuerySearchResult querySearchResult = createTestInstance(); Version version = VersionUtils.randomVersion(random()); QuerySearchResult deserialized = copyWriteable(querySearchResult, namedWriteableRegistry, QuerySearchResult::new, version); - assertEquals(querySearchResult.getContextId(), deserialized.getContextId()); + assertEquals(querySearchResult.getContextId().getId(), deserialized.getContextId().getId()); assertNull(deserialized.getSearchShardTarget()); assertEquals(querySearchResult.topDocs().maxScore, deserialized.topDocs().maxScore, 0f); assertEquals(querySearchResult.topDocs().topDocs.totalHits, deserialized.topDocs().topDocs.totalHits); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index bf70d88b3a072..5005aaed31f1a 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -1611,7 +1611,7 @@ clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedActi actions.put(SearchAction.INSTANCE, new TransportSearchAction(client, threadPool, transportService, searchService, searchTransportService, searchPhaseController, clusterService, - actionFilters, indexNameExpressionResolver)); + actionFilters, indexNameExpressionResolver, namedWriteableRegistry)); actions.put(RestoreSnapshotAction.INSTANCE, new TransportRestoreSnapshotAction(transportService, clusterService, threadPool, restoreService, actionFilters, indexNameExpressionResolver)); @@ -1650,7 +1650,8 @@ clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedActi snapshotsService, actionFilters, indexNameExpressionResolver )); client.initialize(actions, transportService.getTaskManager(), - () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); + () -> clusterService.localNode().getId(), transportService.getRemoteClusterService(), + new NamedWriteableRegistry(List.of())); } private Repository.Factory getRepoFactory(Environment environment) { diff --git a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java index 2ff981cc16e9b..0dd85660e22f6 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java +++ b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java @@ -27,7 +27,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.fetch.FetchPhase; -import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.threadpool.ThreadPool; import java.util.HashMap; @@ -41,13 +41,13 @@ public class MockSearchService extends SearchService { */ public static class TestPlugin extends Plugin {} - private static final Map ACTIVE_SEARCH_CONTEXTS = new ConcurrentHashMap<>(); + private static final Map ACTIVE_SEARCH_CONTEXTS = new ConcurrentHashMap<>(); - private Consumer onPutContext = context -> {}; + private Consumer onPutContext = context -> {}; /** Throw an {@link AssertionError} if there are still in-flight contexts. */ public static void assertNoInFlightContext() { - final Map copy = new HashMap<>(ACTIVE_SEARCH_CONTEXTS); + final Map copy = new HashMap<>(ACTIVE_SEARCH_CONTEXTS); if (copy.isEmpty() == false) { throw new AssertionError( "There are still [" + copy.size() @@ -59,14 +59,14 @@ public static void assertNoInFlightContext() { /** * Add an active search context to the list of tracked contexts. Package private for testing. */ - static void addActiveContext(SearchContext context) { + static void addActiveContext(ReaderContext context) { ACTIVE_SEARCH_CONTEXTS.put(context, new RuntimeException(context.toString())); } /** * Clear an active search context from the list of tracked contexts. Package private for testing. */ - static void removeActiveContext(SearchContext context) { + static void removeActiveContext(ReaderContext context) { ACTIVE_SEARCH_CONTEXTS.remove(context); } @@ -77,22 +77,22 @@ public MockSearchService(ClusterService clusterService, } @Override - protected void putContext(SearchContext context) { + protected void putReaderContext(ReaderContext context) { onPutContext.accept(context); addActiveContext(context); - super.putContext(context); + super.putReaderContext(context); } @Override - protected SearchContext removeContext(long id) { - final SearchContext removed = super.removeContext(id); + protected ReaderContext removeReaderContext(long id) { + final ReaderContext removed = super.removeReaderContext(id); if (removed != null) { removeActiveContext(removed); } return removed; } - public void setOnPutContext(Consumer onPutContext) { + public void setOnPutContext(Consumer onPutContext) { this.onPutContext = onPutContext; } } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index f5861c3692c3c..6b7f7cfcf5ca7 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -340,7 +340,7 @@ public boolean shouldCache(Query query) { * close their sub-aggregations. This is fairly similar to what the production code does. */ releasables.add((Releasable) invocation.getArguments()[0]); return null; - }).when(searchContext).addReleasable(anyObject(), anyObject()); + }).when(searchContext).addReleasable(anyObject()); return searchContext; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index 59d9abd82b4bc..1a418d1aebf57 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -51,9 +51,10 @@ import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext; import org.elasticsearch.search.fetch.subphase.highlight.SearchHighlightContext; import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; @@ -113,12 +114,18 @@ public TestSearchContext(QueryShardContext queryShardContext, IndexShard indexSh } public TestSearchContext(QueryShardContext queryShardContext, IndexShard indexShard, ContextIndexSearcher searcher) { + this(queryShardContext, indexShard, searcher, null); + } + + public TestSearchContext(QueryShardContext queryShardContext, IndexShard indexShard, + ContextIndexSearcher searcher, ScrollContext scrollContext) { this.bigArrays = null; this.indexService = null; this.fixedBitSetFilterCache = null; this.indexShard = indexShard; this.queryShardContext = queryShardContext; this.searcher = searcher; + this.scrollContext = scrollContext; } public void setSearcher(ContextIndexSearcher searcher) { @@ -135,8 +142,8 @@ public Query buildFilteredQuery(Query query) { } @Override - public SearchContextId id() { - return new SearchContextId("", 0); + public ShardSearchContextId id() { + return new ShardSearchContextId("", 0); } @Override @@ -169,22 +176,11 @@ public float queryBoost() { return 0; } - @Override - public long getOriginNanoTime() { - return originNanoTime; - } - @Override public ScrollContext scrollContext() { return scrollContext; } - @Override - public SearchContext scrollContext(ScrollContext scrollContext) { - this.scrollContext = scrollContext; - return this; - } - @Override public SearchContextAggregations aggregations() { return aggregations; @@ -229,10 +225,6 @@ public List rescore() { return Collections.emptyList(); } - @Override - public void addRescore(RescoreContext rescore) { - } - @Override public boolean hasScriptFields() { return false; @@ -550,24 +542,6 @@ public SearchContext docIdsToLoad(int[] docIdsToLoad, int docsIdsToLoadFrom, int return null; } - @Override - public void accessed(long accessTime) { - } - - @Override - public long lastAccessTime() { - return 0; - } - - @Override - public long keepAlive() { - return 0; - } - - @Override - public void keepAlive(long keepAlive) { - } - @Override public DfsSearchResult dfsResult() { return null; @@ -640,4 +614,14 @@ public SearchShardTask getTask() { public boolean isCancelled() { return task.isCancelled(); } + + @Override + public void addRescore(RescoreContext rescore) { + + } + + @Override + public ReaderContext readerContext() { + throw new UnsupportedOperationException(); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java index 42b7a4d8b102d..435feda33280a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java @@ -25,6 +25,7 @@ import org.elasticsearch.index.engine.InternalEngine; import java.io.IOException; +import java.util.function.Function; final class MockInternalEngine extends InternalEngine { private MockEngineSupport support; @@ -81,4 +82,9 @@ public Engine.Searcher acquireSearcher(String source, SearcherScope scope) { final Engine.Searcher engineSearcher = super.acquireSearcher(source, scope); return support().wrapSearcher(engineSearcher); } + + @Override + public SearcherSupplier acquireSearcherSupplier(Function wrapper, SearcherScope scope) throws EngineException { + return super.acquireSearcherSupplier(wrapper.andThen(s -> support().wrapSearcher(s)), scope); + } } diff --git a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java index 94bd637781c83..b49f1fb0acaf5 100644 --- a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java +++ b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java @@ -19,59 +19,25 @@ package org.elasticsearch.search; -import org.apache.lucene.search.Query; -import org.elasticsearch.Version; -import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.TestSearchContext; + +import static org.mockito.Mockito.mock; public class MockSearchServiceTests extends ESTestCase { - public static final IndexMetadata EMPTY_INDEX_METADATA = IndexMetadata.builder("") - .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)) - .numberOfShards(1).numberOfReplicas(0).build(); public void testAssertNoInFlightContext() { - final long nowInMillis = randomNonNegativeLong(); - SearchContext s = new TestSearchContext(new QueryShardContext(0, - new IndexSettings(EMPTY_INDEX_METADATA, Settings.EMPTY), BigArrays.NON_RECYCLING_INSTANCE, null, null, null, null, null, - xContentRegistry(), writableRegistry(), null, null, () -> nowInMillis, null, null, () -> true, null)) { - - @Override - public SearchShardTarget shardTarget() { - return new SearchShardTarget("node", new ShardId("idx", "ignored", 0), null, OriginalIndices.NONE); - } - - @Override - public SearchType searchType() { - return SearchType.DEFAULT; - } - - @Override - public Query query() { - return Queries.newMatchAllQuery(); - } - }; - MockSearchService.addActiveContext(s); + ReaderContext reader = mock(ReaderContext.class); + MockSearchService.addActiveContext(reader); try { Throwable e = expectThrows(AssertionError.class, () -> MockSearchService.assertNoInFlightContext()); assertEquals("There are still [1] in-flight contexts. The first one's creation site is listed as the cause of this exception.", e.getMessage()); e = e.getCause(); - // The next line with throw an exception if the date looks wrong - assertEquals("[node][idx][0] query=[*:*]", e.getMessage()); assertEquals(MockSearchService.class.getName(), e.getStackTrace()[0].getClassName()); assertEquals(MockSearchServiceTests.class.getName(), e.getStackTrace()[1].getClassName()); } finally { - MockSearchService.removeActiveContext(s); + MockSearchService.removeActiveContext(reader); } } } diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/search/PointInTimeIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/search/PointInTimeIT.java new file mode 100644 index 0000000000000..42a9ab211b844 --- /dev/null +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/core/search/PointInTimeIT.java @@ -0,0 +1,273 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.search; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.search.action.ClosePointInTimeAction; +import org.elasticsearch.xpack.core.search.action.ClosePointInTimeRequest; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.search.SearchContextMissingException; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xpack.core.search.action.OpenPointInTimeAction; +import org.elasticsearch.xpack.core.search.action.OpenPointInTimeRequest; +import org.elasticsearch.xpack.core.search.action.OpenPointInTimeResponse; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.arrayWithSize; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class PointInTimeIT extends ESIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(SearchService.KEEPALIVE_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(randomIntBetween(100, 500))) + .build(); + } + + @Override + protected Collection> nodePlugins() { + final List> plugins = new ArrayList<>(); + plugins.add(LocalStateCompositeXPackPlugin.class); + return plugins; + } + + public void testBasic() { + createIndex("test"); + int numDocs = randomIntBetween(10, 50); + for (int i = 0; i < numDocs; i++) { + String id = Integer.toString(i); + client().prepareIndex("test").setId(id).setSource("value", i).get(); + } + refresh("test"); + String readerId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); + SearchResponse resp1 = client().prepareSearch().setPreference(null).setSearchContext(readerId, TimeValue.timeValueMinutes(2)).get(); + assertThat(resp1.pointInTimeId(), equalTo(readerId)); + assertHitCount(resp1, numDocs); + int deletedDocs = 0; + for (int i = 0; i < numDocs; i++) { + if (randomBoolean()) { + String id = Integer.toString(i); + client().prepareDelete("test", id).get(); + deletedDocs++; + } + } + refresh("test"); + if (randomBoolean()) { + SearchResponse resp2 = client().prepareSearch("test").setPreference(null).setQuery(new MatchAllQueryBuilder()).get(); + assertNoFailures(resp2); + assertHitCount(resp2, numDocs - deletedDocs); + } + try { + SearchResponse resp3 = client().prepareSearch() + .setPreference(null) + .setQuery(new MatchAllQueryBuilder()) + .setSearchContext(resp1.pointInTimeId(), TimeValue.timeValueMinutes(2)) + .get(); + assertNoFailures(resp3); + assertHitCount(resp3, numDocs); + assertThat(resp3.pointInTimeId(), equalTo(readerId)); + } finally { + closePointInTime(readerId); + } + } + + public void testMultipleIndices() { + int numIndices = randomIntBetween(1, 5); + for (int i = 1; i <= numIndices; i++) { + createIndex("index-" + i); + } + int numDocs = randomIntBetween(10, 50); + for (int i = 0; i < numDocs; i++) { + String id = Integer.toString(i); + String index = "index-" + randomIntBetween(1, numIndices); + client().prepareIndex(index).setId(id).setSource("value", i).get(); + } + refresh(); + String readerId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)); + SearchResponse resp1 = client().prepareSearch().setPreference(null).setSearchContext(readerId, TimeValue.timeValueMinutes(2)).get(); + assertNoFailures(resp1); + assertHitCount(resp1, numDocs); + int moreDocs = randomIntBetween(10, 50); + for (int i = 0; i < moreDocs; i++) { + String id = "more-" + i; + String index = "index-" + randomIntBetween(1, numIndices); + client().prepareIndex(index).setId(id).setSource("value", i).get(); + } + refresh(); + try { + SearchResponse resp2 = client().prepareSearch().get(); + assertNoFailures(resp2); + assertHitCount(resp2, numDocs + moreDocs); + + SearchResponse resp3 = client().prepareSearch() + .setPreference(null) + .setSearchContext(resp1.pointInTimeId(), TimeValue.timeValueMinutes(1)) + .get(); + assertNoFailures(resp3); + assertHitCount(resp3, numDocs); + } finally { + closePointInTime(resp1.pointInTimeId()); + } + } + + public void testPointInTimeNotFound() throws Exception { + createIndex("index"); + int index1 = randomIntBetween(10, 50); + for (int i = 0; i < index1; i++) { + String id = Integer.toString(i); + client().prepareIndex("index").setId(id).setSource("value", i).get(); + } + refresh(); + String readerId = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)); + SearchResponse resp1 = client().prepareSearch() + .setPreference(null) + .setSearchContext(readerId, TimeValue.timeValueMillis(randomIntBetween(0, 10))) + .get(); + assertNoFailures(resp1); + assertHitCount(resp1, index1); + if (rarely()) { + assertBusy(() -> { + final CommonStats stats = client().admin().indices().prepareStats().setSearch(true).get().getTotal(); + assertThat(stats.search.getOpenContexts(), equalTo(0L)); + }, 60, TimeUnit.SECONDS); + } else { + closePointInTime(resp1.pointInTimeId()); + } + SearchPhaseExecutionException e = expectThrows( + SearchPhaseExecutionException.class, + () -> client().prepareSearch() + .setPreference(null) + .setSearchContext(resp1.pointInTimeId(), TimeValue.timeValueMinutes(1)) + .get() + ); + for (ShardSearchFailure failure : e.shardFailures()) { + assertThat(ExceptionsHelper.unwrapCause(failure.getCause()), instanceOf(SearchContextMissingException.class)); + } + } + + public void testIndexNotFound() { + createIndex("index-1"); + createIndex("index-2"); + + int index1 = randomIntBetween(10, 50); + for (int i = 0; i < index1; i++) { + String id = Integer.toString(i); + client().prepareIndex("index-1").setId(id).setSource("value", i).get(); + } + + int index2 = randomIntBetween(10, 50); + for (int i = 0; i < index2; i++) { + String id = Integer.toString(i); + client().prepareIndex("index-2").setId(id).setSource("value", i).get(); + } + refresh(); + String readerId = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)); + SearchResponse resp1 = client().prepareSearch().setPreference(null).setSearchContext(readerId, TimeValue.timeValueMinutes(2)).get(); + assertNoFailures(resp1); + assertHitCount(resp1, index1 + index2); + client().admin().indices().prepareDelete("index-1").get(); + if (randomBoolean()) { + SearchResponse resp2 = client().prepareSearch("index-*").get(); + assertNoFailures(resp2); + assertHitCount(resp2, index2); + + } + expectThrows( + IndexNotFoundException.class, + () -> client().prepareSearch() + .setPreference(null) + .setSearchContext(resp1.pointInTimeId(), TimeValue.timeValueMinutes(1)) + .get() + ); + closePointInTime(resp1.pointInTimeId()); + } + + public void testCanMatch() throws Exception { + final Settings.Builder settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(5, 10)) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.timeValueMillis(randomIntBetween(50, 100))); + assertAcked( + prepareCreate("test").setSettings(settings) + .setMapping("{\"properties\":{\"created_date\":{\"type\": \"date\", \"format\": \"yyyy-MM-dd\"}}}") + ); + ensureGreen("test"); + String readerId = openPointInTime(new String[] { "test*" }, TimeValue.timeValueMinutes(2)); + try { + for (String node : internalCluster().nodesInclude("test")) { + for (IndexService indexService : internalCluster().getInstance(IndicesService.class, node)) { + for (IndexShard indexShard : indexService) { + assertBusy(() -> assertTrue(indexShard.isSearchIdle())); + } + } + } + client().prepareIndex("test").setId("1").setSource("created_date", "2020-01-01").get(); + SearchResponse resp = client().prepareSearch() + .setQuery(new RangeQueryBuilder("created_date").gte("2020-01-02").lte("2020-01-03")) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .setPreference(null) + .setPreFilterShardSize(randomIntBetween(2, 3)) + .setMaxConcurrentShardRequests(randomIntBetween(1, 2)) + .setSearchContext(readerId, TimeValue.timeValueMinutes(2)) + .get(); + assertThat(resp.getHits().getHits(), arrayWithSize(0)); + for (String node : internalCluster().nodesInclude("test")) { + for (IndexService indexService : internalCluster().getInstance(IndicesService.class, node)) { + for (IndexShard indexShard : indexService) { + // all shards are still search-idle as we did not acquire new searchers + assertTrue(indexShard.isSearchIdle()); + } + } + } + } finally { + closePointInTime(readerId); + } + } + + private String openPointInTime(String[] indices, TimeValue keepAlive) { + OpenPointInTimeRequest request = new OpenPointInTimeRequest( + indices, + OpenPointInTimeRequest.DEFAULT_INDICES_OPTIONS, + keepAlive, + null, + null + ); + final OpenPointInTimeResponse response = client().execute(OpenPointInTimeAction.INSTANCE, request).actionGet(); + return response.getSearchContextId(); + } + + private void closePointInTime(String readerId) { + client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(readerId)).actionGet(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 050a20a49e7e4..da3b192fbfa72 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -70,6 +70,12 @@ import org.elasticsearch.xpack.core.rest.action.RestReloadAnalyzersAction; import org.elasticsearch.xpack.core.rest.action.RestXPackInfoAction; import org.elasticsearch.xpack.core.rest.action.RestXPackUsageAction; +import org.elasticsearch.xpack.core.search.action.ClosePointInTimeAction; +import org.elasticsearch.xpack.core.search.action.OpenPointInTimeAction; +import org.elasticsearch.xpack.core.search.action.RestClosePointInTimeAction; +import org.elasticsearch.xpack.core.search.action.RestOpenPointInTimeAction; +import org.elasticsearch.xpack.core.search.action.TransportClosePointInTimeAction; +import org.elasticsearch.xpack.core.search.action.TransportOpenPointInTimeAction; import org.elasticsearch.xpack.core.security.authc.TokenMetadata; import org.elasticsearch.xpack.core.ssl.SSLConfiguration; import org.elasticsearch.xpack.core.ssl.SSLConfigurationReloader; @@ -272,6 +278,8 @@ public Collection createComponents(Client client, ClusterService cluster actions.addAll(licensing.getActions()); actions.add(new ActionHandler<>(ReloadAnalyzerAction.INSTANCE, TransportReloadAnalyzersAction.class)); actions.add(new ActionHandler<>(DeleteAsyncResultAction.INSTANCE, TransportDeleteAsyncResultAction.class)); + actions.add(new ActionHandler<>(OpenPointInTimeAction.INSTANCE, TransportOpenPointInTimeAction.class)); + actions.add(new ActionHandler<>(ClosePointInTimeAction.INSTANCE, TransportClosePointInTimeAction.class)); return actions; } @@ -310,6 +318,8 @@ public List getRestHandlers(Settings settings, RestController restC handlers.add(new RestReloadAnalyzersAction()); handlers.addAll(licensing.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, settingsFilter, indexNameExpressionResolver, nodesInCluster)); + handlers.add(new RestOpenPointInTimeAction()); + handlers.add(new RestClosePointInTimeAction()); return handlers; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/ClosePointInTimeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/ClosePointInTimeAction.java new file mode 100644 index 0000000000000..ff561f7e3962d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/ClosePointInTimeAction.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.search.action; + +import org.elasticsearch.action.ActionType; + +public class ClosePointInTimeAction extends ActionType { + + public static final ClosePointInTimeAction INSTANCE = new ClosePointInTimeAction(); + public static final String NAME = "indices:data/read/close_point_in_time"; + + private ClosePointInTimeAction() { + super(NAME, ClosePointInTimeResponse::new); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/ClosePointInTimeRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/ClosePointInTimeRequest.java new file mode 100644 index 0000000000000..8a9808a72521a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/ClosePointInTimeRequest.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.search.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class ClosePointInTimeRequest extends ActionRequest implements ToXContentObject { + private static final ParseField ID = new ParseField("id"); + + private final String id; + + public ClosePointInTimeRequest(StreamInput in) throws IOException { + super(in); + this.id = in.readString(); + } + + public ClosePointInTimeRequest(String id) { + this.id = id; + } + + public String getId() { + return id; + } + + @Override + public ActionRequestValidationException validate() { + if (Strings.isEmpty(id)) { + throw new IllegalArgumentException("reader id must be specified"); + } + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(id); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), id); + builder.endObject(); + return builder; + } + + public static ClosePointInTimeRequest fromXContent(XContentParser parser) throws IOException { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Malformed content, must start with an object"); + } else { + XContentParser.Token token; + String id = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME && parser.currentName().equals(ID.getPreferredName())) { + token = parser.nextToken(); + if (token.isValue() == false) { + throw new IllegalArgumentException("the request must contain only [" + ID.getPreferredName() + " field"); + } + id = parser.text(); + } else { + throw new IllegalArgumentException("Unknown parameter [" + parser.currentName() + + "] in request body or parameter is of the wrong type[" + token + "] "); + } + } + if (Strings.isNullOrEmpty(id)) { + throw new IllegalArgumentException("search context id is is not provided"); + } + return new ClosePointInTimeRequest(id); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/ClosePointInTimeResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/ClosePointInTimeResponse.java new file mode 100644 index 0000000000000..51c657e9e9a66 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/ClosePointInTimeResponse.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.search.action; + +import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +public class ClosePointInTimeResponse extends ClearScrollResponse { + public ClosePointInTimeResponse(boolean succeeded, int numFreed) { + super(succeeded, numFreed); + } + + public ClosePointInTimeResponse(StreamInput in) throws IOException { + super(in); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/OpenPointInTimeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/OpenPointInTimeAction.java new file mode 100644 index 0000000000000..8776b588eb934 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/OpenPointInTimeAction.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.search.action; + +import org.elasticsearch.action.ActionType; + +public class OpenPointInTimeAction extends ActionType { + public static final String NAME = "indices:data/read/open_point_in_time"; + public static final OpenPointInTimeAction INSTANCE = new OpenPointInTimeAction(); + + private OpenPointInTimeAction() { + super(NAME, OpenPointInTimeResponse::new); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/OpenPointInTimeRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/OpenPointInTimeRequest.java new file mode 100644 index 0000000000000..89094b5f1a8c9 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/OpenPointInTimeRequest.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.search.action; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.search.SearchTask; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public final class OpenPointInTimeRequest extends ActionRequest implements IndicesRequest.Replaceable { + private String[] indices; + private final IndicesOptions indicesOptions; + private final TimeValue keepAlive; + + @Nullable + private final String routing; + @Nullable + private final String preference; + + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosed(); + + public OpenPointInTimeRequest(String[] indices, IndicesOptions indicesOptions, + TimeValue keepAlive, String routing, String preference) { + this.indices = Objects.requireNonNull(indices); + this.indicesOptions = Objects.requireNonNull(indicesOptions); + this.keepAlive = keepAlive; + this.routing = routing; + this.preference = preference; + } + + public OpenPointInTimeRequest(StreamInput in) throws IOException { + super(in); + this.indices = in.readStringArray(); + this.indicesOptions = IndicesOptions.readIndicesOptions(in); + this.keepAlive = in.readTimeValue(); + this.routing = in.readOptionalString(); + this.preference = in.readOptionalString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + out.writeTimeValue(keepAlive); + out.writeOptionalString(routing); + out.writeOptionalString(preference); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (indices.length == 0) { + validationException = addValidationError("[index] is not specified", validationException); + } + if (keepAlive == null) { + validationException = addValidationError("[keep_alive] is not specified", validationException); + } + return validationException; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public OpenPointInTimeRequest indices(String... indices) { + this.indices = indices; + return this; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public TimeValue keepAlive() { + return keepAlive; + } + + public String routing() { + return routing; + } + + public String preference() { + return preference; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new SearchTask(id, type, action, null, parentTaskId, headers) { + @Override + public String getDescription() { + return "open search context: indices [" + String.join(",", indices) + "] keep_alive [" + keepAlive + "]"; + } + }; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/OpenPointInTimeResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/OpenPointInTimeResponse.java new file mode 100644 index 0000000000000..bf04039687816 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/OpenPointInTimeResponse.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.search.action; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +public final class OpenPointInTimeResponse extends ActionResponse implements ToXContentObject { + private static final ParseField ID = new ParseField("id"); + + private final String searchContextId; + + public OpenPointInTimeResponse(String searchContextId) { + this.searchContextId = searchContextId; + } + + public OpenPointInTimeResponse(StreamInput in) throws IOException { + super(in); + searchContextId = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(searchContextId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), searchContextId); + builder.endObject(); + return builder; + } + + public String getSearchContextId() { + return searchContextId; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/RestClosePointInTimeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/RestClosePointInTimeAction.java new file mode 100644 index 0000000000000..704b3ed7c5b95 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/RestClosePointInTimeAction.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.search.action; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestStatusToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.DELETE; + +public class RestClosePointInTimeAction extends BaseRestHandler { + + @Override + public List routes() { + return List.of(new Route(DELETE, "/_pit")); + } + + @Override + public String getName() { + return "close_point_in_time"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final ClosePointInTimeRequest clearRequest; + try (XContentParser parser = request.contentOrSourceParamParser()) { + clearRequest = ClosePointInTimeRequest.fromXContent(parser); + } + return channel -> client.execute(ClosePointInTimeAction.INSTANCE, clearRequest, new RestStatusToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/RestOpenPointInTimeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/RestOpenPointInTimeAction.java new file mode 100644 index 0000000000000..ba2c2721929bb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/RestOpenPointInTimeAction.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.search.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public class RestOpenPointInTimeAction extends BaseRestHandler { + + @Override + public String getName() { + return "open_point_in_time"; + } + + @Override + public List routes() { + return List.of(new Route(POST, "/{index}/_pit")); + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); + final IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, OpenPointInTimeRequest.DEFAULT_INDICES_OPTIONS); + final String routing = request.param("routing"); + final String preference = request.param("preference"); + final TimeValue keepAlive = TimeValue.parseTimeValue(request.param("keep_alive"), null, "keep_alive"); + final OpenPointInTimeRequest openRequest = new OpenPointInTimeRequest(indices, indicesOptions, keepAlive, routing, preference); + return channel -> client.execute(OpenPointInTimeAction.INSTANCE, openRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/TransportClosePointInTimeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/TransportClosePointInTimeAction.java new file mode 100644 index 0000000000000..02ac971c9271a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/TransportClosePointInTimeAction.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.search.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.ClearScrollController; +import org.elasticsearch.action.search.SearchContextId; +import org.elasticsearch.action.search.SearchContextIdForNode; +import org.elasticsearch.action.search.SearchTransportService; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; + +import java.util.Collection; + +public class TransportClosePointInTimeAction extends HandledTransportAction { + + private final ClusterService clusterService; + private final SearchTransportService searchTransportService; + private final NamedWriteableRegistry namedWriteableRegistry; + + @Inject + public TransportClosePointInTimeAction( + TransportService transportService, + ClusterService clusterService, + ActionFilters actionFilters, + SearchTransportService searchTransportService, + NamedWriteableRegistry namedWriteableRegistry + ) { + super(ClosePointInTimeAction.NAME, transportService, actionFilters, ClosePointInTimeRequest::new); + this.clusterService = clusterService; + this.searchTransportService = searchTransportService; + this.namedWriteableRegistry = namedWriteableRegistry; + } + + @Override + protected void doExecute(Task task, ClosePointInTimeRequest request, ActionListener listener) { + final SearchContextId searchContextId = SearchContextId.decode(namedWriteableRegistry, request.getId()); + final Collection contextIds = searchContextId.shards().values(); + ClearScrollController.closeContexts( + clusterService.state().nodes(), + searchTransportService, + contextIds, + ActionListener.map(listener, freed -> new ClosePointInTimeResponse(freed == contextIds.size(), freed)) + ); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/TransportOpenPointInTimeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/TransportOpenPointInTimeAction.java new file mode 100644 index 0000000000000..d6cb1ee75916f --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/TransportOpenPointInTimeAction.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.search.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ChannelActionListener; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportActionProxy; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; + +public class TransportOpenPointInTimeAction extends HandledTransportAction { + public static final String OPEN_SHARD_READER_CONTEXT_NAME = "indices:data/read/open_reader_context"; + + private final TransportSearchAction transportSearchAction; + private final TransportService transportService; + private final SearchService searchService; + + @Inject + public TransportOpenPointInTimeAction( + TransportService transportService, + SearchService searchService, + ActionFilters actionFilters, + TransportSearchAction transportSearchAction + ) { + super(OpenPointInTimeAction.NAME, transportService, actionFilters, OpenPointInTimeRequest::new); + this.transportService = transportService; + this.transportSearchAction = transportSearchAction; + this.searchService = searchService; + transportService.registerRequestHandler( + OPEN_SHARD_READER_CONTEXT_NAME, + ThreadPool.Names.SAME, + ShardOpenReaderRequest::new, + new ShardOpenReaderRequestHandler() + ); + TransportActionProxy.registerProxyAction( + transportService, + OPEN_SHARD_READER_CONTEXT_NAME, + TransportOpenPointInTimeAction.ShardOpenReaderResponse::new + ); + } + + @Override + protected void doExecute(Task task, OpenPointInTimeRequest request, ActionListener listener) { + final SearchRequest searchRequest = new SearchRequest().indices(request.indices()) + .indicesOptions(request.indicesOptions()) + .preference(request.preference()) + .routing(request.routing()) + .allowPartialSearchResults(false); + transportSearchAction.executeRequest( + task, + searchRequest, + "open_search_context", + true, + (searchTask, shardTarget, connection, phaseListener) -> { + final ShardOpenReaderRequest shardRequest = new ShardOpenReaderRequest( + shardTarget.getShardId(), + shardTarget.getOriginalIndices(), + request.keepAlive() + ); + transportService.sendChildRequest( + connection, + OPEN_SHARD_READER_CONTEXT_NAME, + shardRequest, + searchTask, + new ActionListenerResponseHandler(phaseListener, ShardOpenReaderResponse::new) + ); + }, + ActionListener.map(listener, r -> new OpenPointInTimeResponse(r.pointInTimeId())) + ); + } + + private static final class ShardOpenReaderRequest extends TransportRequest implements IndicesRequest { + final ShardId shardId; + final OriginalIndices originalIndices; + final TimeValue keepAlive; + + ShardOpenReaderRequest(ShardId shardId, OriginalIndices originalIndices, TimeValue keepAlive) { + this.shardId = shardId; + this.originalIndices = originalIndices; + this.keepAlive = keepAlive; + } + + ShardOpenReaderRequest(StreamInput in) throws IOException { + super(in); + shardId = new ShardId(in); + originalIndices = OriginalIndices.readOriginalIndices(in); + keepAlive = in.readTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + shardId.writeTo(out); + OriginalIndices.writeOriginalIndices(originalIndices, out); + out.writeTimeValue(keepAlive); + } + + public ShardId getShardId() { + return shardId; + } + + @Override + public String[] indices() { + return originalIndices.indices(); + } + + @Override + public IndicesOptions indicesOptions() { + return originalIndices.indicesOptions(); + } + } + + private static final class ShardOpenReaderResponse extends SearchPhaseResult { + ShardOpenReaderResponse(ShardSearchContextId contextId) { + this.contextId = contextId; + } + + ShardOpenReaderResponse(StreamInput in) throws IOException { + super(in); + contextId = new ShardSearchContextId(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + contextId.writeTo(out); + } + } + + private class ShardOpenReaderRequestHandler implements TransportRequestHandler { + @Override + public void messageReceived(ShardOpenReaderRequest request, TransportChannel channel, Task task) throws Exception { + searchService.openReaderContext( + request.getShardId(), + request.keepAlive, + ActionListener.map( + new ChannelActionListener<>(channel, OPEN_SHARD_READER_CONTEXT_NAME, request), + ShardOpenReaderResponse::new + ) + ); + } + } +} diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java index b2fa488c8ca2d..f01cbc35ecebd 100644 --- a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java @@ -22,7 +22,7 @@ import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; @@ -157,7 +157,7 @@ public void onIndexModule(IndexModule indexModule) { super.onIndexModule(indexModule); indexModule.addSearchOperationListener(new SearchOperationListener() { @Override - public void onNewContext(SearchContext context) { + public void onNewReaderContext(ReaderContext readerContext) { contexts.incrementAndGet(); try { logger.trace("blocking search on " + nodeId); diff --git a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java index f639ccb3b2777..3ebe32c933c8b 100644 --- a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java +++ b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java @@ -5,65 +5,37 @@ */ package org.elasticsearch.index.engine; -import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.Fields; -import org.apache.lucene.index.FilterDirectoryReader; -import org.apache.lucene.index.FilterLeafReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.LeafMetaData; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.index.PointValues; -import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentReader; import org.apache.lucene.index.SoftDeletesDirectoryReaderWrapper; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.index.StoredFieldVisitor; -import org.apache.lucene.index.Terms; import org.apache.lucene.search.ReferenceManager; -import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.Bits; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.shard.DocsStats; -import org.elasticsearch.index.shard.SearchOperationListener; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.index.store.Store; import java.io.IOException; import java.io.UncheckedIOException; -import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.function.Function; /** - * This is a stand-alone read-only engine that maintains a lazy loaded index reader that is opened on calls to - * {@link Engine#acquireSearcher(String)}. The index reader opened is maintained until there are no reference to it anymore and then - * releases itself from the engine. The readers returned from this engine are lazy which allows release after and reset before a search - * phase starts. This allows releasing references as soon as possible on the search layer. - * - * Internally this class uses a set of wrapper abstractions to allow a reader that is used inside the {@link Engine.Searcher} returned from - * {@link #acquireSearcher(String, SearcherScope)} to release and reset it's internal resources. This is necessary to for instance release - * all SegmentReaders after a search phase finishes and reopen them before the next search phase starts. This together with a throttled - * threadpool (search_throttled) guarantees that at most N frozen shards have a low level index reader open at the same time. - * - * In particular we have LazyDirectoryReader that wraps its LeafReaders (the actual segment readers) inside LazyLeafReaders. Each of the - * LazyLeafReader delegates to segment LeafReader that can be reset (it's reference decremented and nulled out) on a search phase is - * finished. Before the next search phase starts we can reopen the corresponding reader and reset the reference to execute the search phase. - * This allows the SearchContext to hold on to the same LazyDirectoryReader across its lifecycle but under the hood resources (memory) is - * released while the SearchContext phases are not executing. - * + * This is a stand-alone read-only engine that maintains an index reader that is opened lazily on calls to + * {@link SearcherSupplier#acquireSearcher(String)}. The index reader opened is maintained until there are no reference to it anymore + * and then releases itself from the engine. + * This is necessary to for instance release all SegmentReaders after a search phase finishes and reopen them before the next search + * phase starts. + * This together with a throttled threadpool (search_throttled) guarantees that at most N frozen shards have a low level index reader + * open at the same time. * The internal reopen of readers is treated like a refresh and refresh listeners are called up-on reopen. This allows to consume refresh * stats in order to obtain the number of reopens. */ @@ -163,6 +135,11 @@ private synchronized void onReaderClosed(IndexReader.CacheKey key) { } } + @SuppressForbidden(reason = "we manage references explicitly here") + private synchronized void closeReader(IndexReader reader) throws IOException { + reader.decRef(); + } + private synchronized ElasticsearchDirectoryReader getOrOpenReader() throws IOException { ElasticsearchDirectoryReader reader = null; boolean success = false; @@ -176,7 +153,7 @@ private synchronized ElasticsearchDirectoryReader getOrOpenReader() throws IOExc reader = lastOpenedReader = wrapReader(dirReader, Function.identity()); processReader(reader); reader.getReaderCacheHelper().addClosedListener(this::onReaderClosed); - for (ReferenceManager.RefreshListener listeners : config ().getInternalRefreshListener()) { + for (ReferenceManager.RefreshListener listeners : config().getInternalRefreshListener()) { listeners.afterRefresh(true); } } @@ -198,397 +175,63 @@ private synchronized ElasticsearchDirectoryReader getReader() { } @Override - @SuppressWarnings("fallthrough") - @SuppressForbidden( reason = "we manage references explicitly here") - public Searcher acquireSearcher(String source, SearcherScope scope) throws EngineException { + public SearcherSupplier acquireSearcherSupplier(Function wrapper, SearcherScope scope) throws EngineException { + final Store store = this.store; store.incRef(); - boolean releaseRefeference = true; - try { - final boolean maybeOpenReader; - switch (source) { - case "load_seq_no": - case "load_version": - assert false : "this is a read-only engine"; - case "doc_stats": - assert false : "doc_stats are overwritten"; - case "refresh_needed": - assert false : "refresh_needed is always false"; - case "segments": - case "segments_stats": - case "completion_stats": - case "can_match": // special case for can_match phase - we use the cached point values reader - maybeOpenReader = false; - break; - default: - maybeOpenReader = true; - } - // special case we only want to report segment stats if we have a reader open. in that case we only get a reader if we still - // have one open at the time and can inc it's reference. - ElasticsearchDirectoryReader reader = maybeOpenReader ? getOrOpenReader() : getReader(); - if (reader == null) { - // we just hand out a searcher on top of an empty reader that we opened for the ReadOnlyEngine in the #open(IndexCommit) - // method. this is the case when we don't have a reader open right now and we get a stats call any other that falls in - // the category that doesn't trigger a reopen - if ("can_match".equals(source)) { - canMatchReader.incRef(); - return new Searcher(source, canMatchReader, - engineConfig.getSimilarity(), engineConfig.getQueryCache(), engineConfig.getQueryCachingPolicy(), - canMatchReader::decRef); - } - return super.acquireSearcher(source, scope); - } else { - try { - LazyDirectoryReader lazyDirectoryReader = new LazyDirectoryReader(reader, this); - Searcher newSearcher = new Searcher(source, lazyDirectoryReader, - engineConfig.getSimilarity(), engineConfig.getQueryCache(), engineConfig.getQueryCachingPolicy(), - () -> IOUtils.close(lazyDirectoryReader, store::decRef)); - releaseRefeference = false; - return newSearcher; - } finally { - if (releaseRefeference) { - reader.decRef(); // don't call close here we manage reference ourselves - } - } - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } finally { - if (releaseRefeference) { - store.decRef(); - } - } - } - - static LazyDirectoryReader unwrapLazyReader(DirectoryReader reader) { - while (reader instanceof FilterDirectoryReader) { - if (reader instanceof LazyDirectoryReader) { - return (LazyDirectoryReader) reader; - } - reader = ((FilterDirectoryReader) reader).getDelegate(); - } - return null; - } - - /* - * We register this listener for a frozen index that will - * 1. reset the reader every time the search context is validated which happens when the context is looked up ie. on a fetch phase - * etc. - * 2. register a releasable resource that is cleaned after each phase that releases the reader for this searcher - */ - public static class ReacquireEngineSearcherListener implements SearchOperationListener { - - @Override - public void validateSearchContext(SearchContext context, TransportRequest transportRequest) { - DirectoryReader dirReader = context.searcher().getDirectoryReader(); - LazyDirectoryReader lazyDirectoryReader = unwrapLazyReader(dirReader); - if (lazyDirectoryReader != null) { - try { - lazyDirectoryReader.reset(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - // also register a release resource in this case if we have multiple roundtrips like in DFS - registerRelease(context, lazyDirectoryReader); - } - } - - private void registerRelease(SearchContext context, LazyDirectoryReader lazyDirectoryReader) { - context.addReleasable(() -> { + return new SearcherSupplier(wrapper) { + @Override + @SuppressForbidden(reason = "we manage references explicitly here") + public Searcher acquireSearcherInternal(String source) { try { - lazyDirectoryReader.release(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - }, SearchContext.Lifetime.PHASE); - } - - @Override - public void onNewContext(SearchContext context) { - DirectoryReader dirReader = context.searcher().getDirectoryReader(); - LazyDirectoryReader lazyDirectoryReader = unwrapLazyReader(dirReader); - if (lazyDirectoryReader != null) { - registerRelease(context, lazyDirectoryReader); - } - } - } - - /** - * This class allows us to use the same high level reader across multiple search phases but replace the underpinnings - * on/after each search phase. This is really important otherwise we would hold on to multiple readers across phases. - * - * This reader and its leaf reader counterpart overrides FilterDirectory/LeafReader for convenience to be unwrapped but still - * overrides all it's delegate methods. We have tests to ensure we never miss an override but we need to in order to make sure - * the wrapper leaf readers don't register themself as close listeners on the wrapped ones otherwise we fail plugging in new readers - * on the next search phase. - */ - static final class LazyDirectoryReader extends FilterDirectoryReader { - - private final FrozenEngine engine; - private volatile DirectoryReader delegate; // volatile since it might be closed concurrently - - private LazyDirectoryReader(DirectoryReader reader, FrozenEngine engine) throws IOException { - super(reader, new SubReaderWrapper() { - @Override - public LeafReader wrap(LeafReader reader) { - return new LazyLeafReader(reader); - } - }); - this.delegate = reader; - this.engine = engine; - } - - @SuppressForbidden(reason = "we manage references explicitly here") - synchronized void release() throws IOException { - if (delegate != null) { // we are lenient here it's ok to double close - delegate.decRef(); - delegate = null; - if (tryIncRef()) { // only do this if we are not closed already - // we end up in this case when we are not closed but in an intermediate - // state were we want to release all or the real leaf readers ie. in between search phases - // but still want to keep this Lazy reference open. In oder to let the heavy real leaf - // readers to be GCed we need to null our the references. - try { - for (LeafReaderContext leaf : leaves()) { - LazyLeafReader reader = (LazyLeafReader) leaf.reader(); - reader.in = null; - } - } finally { - decRef(); - } - } - } - } - - void reset() throws IOException { - boolean success = false; - DirectoryReader reader = engine.getOrOpenReader(); - try { - reset(reader); - success = true; - } finally { - if (success == false) { - IOUtils.close(reader); + return openSearcher(source, scope); + } catch (IOException exc) { + throw new UncheckedIOException(exc); } } - } - - private synchronized void reset(DirectoryReader delegate) { - if (this.delegate != null) { - throw new AssertionError("lazy reader is not released"); - } - assert (delegate instanceof LazyDirectoryReader) == false : "must not be a LazyDirectoryReader"; - List leaves = delegate.leaves(); - int ord = 0; - for (LeafReaderContext leaf : leaves()) { - LazyLeafReader reader = (LazyLeafReader) leaf.reader(); - LeafReader newReader = leaves.get(ord++).reader(); - assert reader.in == null; - reader.in = newReader; - assert reader.info.info.equals(Lucene.segmentReader(newReader).getSegmentInfo().info); - } - this.delegate = delegate; - } - @Override - protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) { - throw new UnsupportedOperationException(); - } - - void ensureOpenOrReset() { - // ensure we fail early and with good exceptions - ensureOpen(); - if (delegate == null) { - throw new AlreadyClosedException("delegate is released"); + @Override + protected void doClose() { + store.decRef(); } - } - - @Override - public long getVersion() { - ensureOpenOrReset(); - return delegate.getVersion(); - } - - @Override - public boolean isCurrent() throws IOException { - ensureOpenOrReset(); - return delegate.isCurrent(); - } - - @Override - public IndexCommit getIndexCommit() throws IOException { - ensureOpenOrReset(); - return delegate.getIndexCommit(); - } - - @Override - protected void doClose() throws IOException { - release(); - } - - @Override - public CacheHelper getReaderCacheHelper() { - ensureOpenOrReset(); - return delegate.getReaderCacheHelper(); - } - - @Override - public DirectoryReader getDelegate() { - ensureOpenOrReset(); - return delegate; - } + }; } - /** - * We basically duplicate a FilterLeafReader here since we don't want the - * incoming reader to register with this reader as a parent reader. This would mean we barf if the incoming - * reader is closed and that is what we actually doing on purpose. - */ - static final class LazyLeafReader extends FilterLeafReader { - - private volatile LeafReader in; - private final SegmentCommitInfo info; - private final int numDocs; - private final int maxDocs; - - private LazyLeafReader(LeafReader in) { - super(Lucene.emptyReader(in.maxDoc())); // empty reader here to make FilterLeafReader happy - this.info = Lucene.segmentReader(in).getSegmentInfo(); - this.in = in; - numDocs = in.numDocs(); - maxDocs = in.maxDoc(); - // don't register in reader as a subreader here. - } - - private void ensureOpenOrReleased() { - ensureOpen(); - if (in == null) { - throw new AlreadyClosedException("leaf is already released"); + @SuppressWarnings("fallthrough") + @SuppressForbidden(reason = "we manage references explicitly here") + private Engine.Searcher openSearcher(String source, SearcherScope scope) throws IOException { + boolean maybeOpenReader; + switch (source) { + case "load_seq_no": + case "load_version": + assert false : "this is a read-only engine"; + case "doc_stats": + assert false : "doc_stats are overwritten"; + case "refresh_needed": + assert false : "refresh_needed is always false"; + case "segments": + case "segments_stats": + case "completion_stats": + case CAN_MATCH_SEARCH_SOURCE: // special case for can_match phase - we use the cached point values reader + maybeOpenReader = false; + break; + default: + maybeOpenReader = true; + } + ElasticsearchDirectoryReader reader = maybeOpenReader ? getOrOpenReader() : getReader(); + if (reader == null) { + if (CAN_MATCH_SEARCH_SOURCE.equals(source)) { + canMatchReader.incRef(); + return new Searcher(source, canMatchReader, engineConfig.getSimilarity(), engineConfig.getQueryCache(), + engineConfig.getQueryCachingPolicy(), canMatchReader::decRef); + } else { + ReferenceManager manager = getReferenceManager(scope); + ElasticsearchDirectoryReader acquire = manager.acquire(); + return new Searcher(source, acquire, engineConfig.getSimilarity(), engineConfig.getQueryCache(), + engineConfig.getQueryCachingPolicy(), () -> manager.release(acquire)); } - } - - @Override - public Bits getLiveDocs() { - ensureOpenOrReleased(); - return in.getLiveDocs(); - } - - @Override - public FieldInfos getFieldInfos() { - ensureOpenOrReleased(); - return in.getFieldInfos(); - } - - @Override - public PointValues getPointValues(String field) throws IOException { - ensureOpenOrReleased(); - return in.getPointValues(field); - } - - @Override - public Fields getTermVectors(int docID) - throws IOException { - ensureOpenOrReleased(); - return in.getTermVectors(docID); - } - - @Override - public int numDocs() { - return numDocs; - } - - @Override - public int maxDoc() { - return maxDocs; - } - - @Override - public void document(int docID, StoredFieldVisitor visitor) throws IOException { - ensureOpenOrReleased(); - in.document(docID, visitor); - } - - @Override - protected void doClose() throws IOException { - in.close(); - } - - @Override - public CacheHelper getReaderCacheHelper() { - ensureOpenOrReleased(); - return in.getReaderCacheHelper(); - } - - @Override - public CacheHelper getCoreCacheHelper() { - ensureOpenOrReleased(); - return in.getCoreCacheHelper(); - } - - @Override - public Terms terms(String field) throws IOException { - ensureOpenOrReleased(); - return in.terms(field); - } - - @Override - public String toString() { - final StringBuilder buffer = new StringBuilder("LazyLeafReader("); - buffer.append(in); - buffer.append(')'); - return buffer.toString(); - } - - @Override - public NumericDocValues getNumericDocValues(String field) throws IOException { - ensureOpenOrReleased(); - return in.getNumericDocValues(field); - } - - @Override - public BinaryDocValues getBinaryDocValues(String field) throws IOException { - ensureOpenOrReleased(); - return in.getBinaryDocValues(field); - } - - @Override - public SortedDocValues getSortedDocValues(String field) throws IOException { - ensureOpenOrReleased(); - return in.getSortedDocValues(field); - } - - @Override - public SortedNumericDocValues getSortedNumericDocValues(String field) throws IOException { - ensureOpenOrReleased(); - return in.getSortedNumericDocValues(field); - } - - @Override - public SortedSetDocValues getSortedSetDocValues(String field) throws IOException { - ensureOpenOrReleased(); - return in.getSortedSetDocValues(field); - } - - @Override - public NumericDocValues getNormValues(String field) throws IOException { - ensureOpenOrReleased(); - return in.getNormValues(field); - } - - @Override - public LeafMetaData getMetaData() { - ensureOpenOrReleased(); - return in.getMetaData(); - } - - @Override - public void checkIntegrity() throws IOException { - ensureOpenOrReleased(); - in.checkIntegrity(); - } - - @Override - public LeafReader getDelegate() { - return in; + } else { + return new Searcher(source, reader, engineConfig.getSimilarity(), engineConfig.getQueryCache(), + engineConfig.getQueryCachingPolicy(), () -> closeReader(reader)); } } diff --git a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndices.java b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndices.java index c2fb11d294dec..7c5e29b03b6ed 100644 --- a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndices.java +++ b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/FrozenIndices.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.FrozenEngine; @@ -55,14 +54,6 @@ public List> getSettings() { return Arrays.asList(FrozenEngine.INDEX_FROZEN); } - @Override - public void onIndexModule(IndexModule indexModule) { - if (FrozenEngine.INDEX_FROZEN.get(indexModule.getSettings())) { - indexModule.addSearchOperationListener(new FrozenEngine.ReacquireEngineSearcherListener()); - } - super.onIndexModule(indexModule); - } - @Override public List> getActions() { List> actions = new ArrayList<>(); diff --git a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java index 468a3846dadf8..8cc3df534a294 100644 --- a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java +++ b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenEngineTests.java @@ -11,7 +11,6 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; @@ -33,6 +32,7 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; import static org.hamcrest.Matchers.equalTo; @@ -51,22 +51,25 @@ public void testAcquireReleaseReset() throws IOException { listener.reset(); try (FrozenEngine frozenEngine = new FrozenEngine(engine.engineConfig, true)) { assertFalse(frozenEngine.isReaderOpen()); - Engine.Searcher searcher = frozenEngine.acquireSearcher("test"); - assertEquals(config.getShardId(), ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(searcher - .getDirectoryReader()).shardId()); - assertTrue(frozenEngine.isReaderOpen()); - TopDocs search = searcher.search(new MatchAllDocsQuery(), numDocs); - assertEquals(search.scoreDocs.length, numDocs); - assertEquals(1, listener.afterRefresh.get()); - FrozenEngine.unwrapLazyReader(searcher.getDirectoryReader()).release(); - assertFalse(frozenEngine.isReaderOpen()); - assertEquals(1, listener.afterRefresh.get()); - expectThrows(AlreadyClosedException.class, () -> searcher.search(new MatchAllDocsQuery(), numDocs)); - FrozenEngine.unwrapLazyReader(searcher.getDirectoryReader()).reset(); - assertEquals(2, listener.afterRefresh.get()); - search = searcher.search(new MatchAllDocsQuery(), numDocs); - assertEquals(search.scoreDocs.length, numDocs); - searcher.close(); + try (Engine.SearcherSupplier reader = frozenEngine.acquireSearcherSupplier(Function.identity())) { + assertFalse(frozenEngine.isReaderOpen()); + try (Engine.Searcher searcher = reader.acquireSearcher("frozen")) { + assertEquals(config.getShardId(), ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(searcher + .getDirectoryReader()).shardId()); + assertTrue(frozenEngine.isReaderOpen()); + TopDocs search = searcher.search(new MatchAllDocsQuery(), numDocs); + assertEquals(search.scoreDocs.length, numDocs); + assertEquals(1, listener.afterRefresh.get()); + } + assertFalse(frozenEngine.isReaderOpen()); + assertEquals(1, listener.afterRefresh.get()); + + try (Engine.Searcher searcher = reader.acquireSearcher("frozen")) { + assertTrue(frozenEngine.isReaderOpen()); + TopDocs search = searcher.search(new MatchAllDocsQuery(), numDocs); + assertEquals(search.scoreDocs.length, numDocs); + } + } } } } @@ -85,24 +88,31 @@ public void testAcquireReleaseResetTwoSearchers() throws IOException { listener.reset(); try (FrozenEngine frozenEngine = new FrozenEngine(engine.engineConfig, true)) { assertFalse(frozenEngine.isReaderOpen()); - Engine.Searcher searcher1 = frozenEngine.acquireSearcher("test"); - assertTrue(frozenEngine.isReaderOpen()); - TopDocs search = searcher1.search(new MatchAllDocsQuery(), numDocs); - assertEquals(search.scoreDocs.length, numDocs); - assertEquals(1, listener.afterRefresh.get()); - FrozenEngine.unwrapLazyReader(searcher1.getDirectoryReader()).release(); - Engine.Searcher searcher2 = frozenEngine.acquireSearcher("test"); - search = searcher2.search(new MatchAllDocsQuery(), numDocs); - assertEquals(search.scoreDocs.length, numDocs); - assertTrue(frozenEngine.isReaderOpen()); - assertEquals(2, listener.afterRefresh.get()); - expectThrows(AlreadyClosedException.class, () -> searcher1.search(new MatchAllDocsQuery(), numDocs)); - FrozenEngine.unwrapLazyReader(searcher1.getDirectoryReader()).reset(); + Engine.SearcherSupplier reader1 = frozenEngine.acquireSearcherSupplier(Function.identity()); + try (Engine.Searcher searcher1 = reader1.acquireSearcher("test")) { + assertTrue(frozenEngine.isReaderOpen()); + TopDocs search = searcher1.search(new MatchAllDocsQuery(), numDocs); + assertEquals(search.scoreDocs.length, numDocs); + assertEquals(1, listener.afterRefresh.get()); + } + assertFalse(frozenEngine.isReaderOpen()); + Engine.SearcherSupplier reader2 = frozenEngine.acquireSearcherSupplier(Function.identity()); + try (Engine.Searcher searcher2 = reader2.acquireSearcher("test")) { + TopDocs search = searcher2.search(new MatchAllDocsQuery(), numDocs); + assertEquals(search.scoreDocs.length, numDocs); + assertTrue(frozenEngine.isReaderOpen()); + assertEquals(2, listener.afterRefresh.get()); + } + assertFalse(frozenEngine.isReaderOpen()); assertEquals(2, listener.afterRefresh.get()); - search = searcher1.search(new MatchAllDocsQuery(), numDocs); - assertEquals(search.scoreDocs.length, numDocs); - searcher1.close(); - searcher2.close(); + reader2.close(); + try (Engine.Searcher searcher1 = reader1.acquireSearcher("test")) { + TopDocs search = searcher1.search(new MatchAllDocsQuery(), numDocs); + assertEquals(search.scoreDocs.length, numDocs); + assertTrue(frozenEngine.isReaderOpen()); + } + reader1.close(); + assertFalse(frozenEngine.isReaderOpen()); } } } @@ -120,21 +130,24 @@ public void testSegmentStats() throws IOException { engine.flushAndClose(); listener.reset(); try (FrozenEngine frozenEngine = new FrozenEngine(engine.engineConfig, true)) { - Engine.Searcher searcher = frozenEngine.acquireSearcher("test"); - SegmentsStats segmentsStats = frozenEngine.segmentsStats(randomBoolean(), false); - assertEquals(frozenEngine.segments(randomBoolean()).size(), segmentsStats.getCount()); - FrozenEngine.unwrapLazyReader(searcher.getDirectoryReader()).release(); - assertEquals(1, listener.afterRefresh.get()); - segmentsStats = frozenEngine.segmentsStats(randomBoolean(), false); - assertEquals(0, segmentsStats.getCount()); - segmentsStats = frozenEngine.segmentsStats(randomBoolean(), true); - assertEquals(frozenEngine.segments(randomBoolean()).size(), segmentsStats.getCount()); - assertEquals(1, listener.afterRefresh.get()); - assertFalse(frozenEngine.isReaderOpen()); - FrozenEngine.unwrapLazyReader(searcher.getDirectoryReader()).reset(); - segmentsStats = frozenEngine.segmentsStats(randomBoolean(), false); - assertEquals(frozenEngine.segments(randomBoolean()).size(), segmentsStats.getCount()); - searcher.close(); + try (Engine.SearcherSupplier reader = frozenEngine.acquireSearcherSupplier(Function.identity())) { + SegmentsStats segmentsStats = frozenEngine.segmentsStats(randomBoolean(), false); + try (Engine.Searcher searcher = reader.acquireSearcher("test")) { + segmentsStats = frozenEngine.segmentsStats(randomBoolean(), false); + assertEquals(frozenEngine.segments(randomBoolean()).size(), segmentsStats.getCount()); + assertEquals(1, listener.afterRefresh.get()); + } + segmentsStats = frozenEngine.segmentsStats(randomBoolean(), false); + assertEquals(0, segmentsStats.getCount()); + try (Engine.Searcher searcher = reader.acquireSearcher("test")) { + segmentsStats = frozenEngine.segmentsStats(randomBoolean(), true); + assertEquals(frozenEngine.segments(randomBoolean()).size(), segmentsStats.getCount()); + assertEquals(2, listener.afterRefresh.get()); + } + assertFalse(frozenEngine.isReaderOpen()); + segmentsStats = frozenEngine.segmentsStats(randomBoolean(), true); + assertEquals(frozenEngine.segments(randomBoolean()).size(), segmentsStats.getCount()); + } } } } @@ -167,16 +180,18 @@ public void testCircuitBreakerAccounting() throws IOException { assertEquals(0, breaker.getUsed()); listener.reset(); try (FrozenEngine frozenEngine = new FrozenEngine(config, true)) { - Engine.Searcher searcher = frozenEngine.acquireSearcher("test"); - assertEquals(expectedUse, breaker.getUsed()); - FrozenEngine.unwrapLazyReader(searcher.getDirectoryReader()).release(); - assertEquals(1, listener.afterRefresh.get()); - assertEquals(0, breaker.getUsed()); - assertFalse(frozenEngine.isReaderOpen()); - FrozenEngine.unwrapLazyReader(searcher.getDirectoryReader()).reset(); - assertEquals(expectedUse, breaker.getUsed()); - searcher.close(); - assertEquals(0, breaker.getUsed()); + try (Engine.SearcherSupplier reader = frozenEngine.acquireSearcherSupplier(Function.identity())) { + try (Engine.Searcher searcher = reader.acquireSearcher("test")) { + assertEquals(expectedUse, breaker.getUsed()); + } + assertEquals(1, listener.afterRefresh.get()); + assertEquals(0, breaker.getUsed()); + assertFalse(frozenEngine.isReaderOpen()); + try (Engine.Searcher searcher = reader.acquireSearcher("test")) { + assertEquals(expectedUse, breaker.getUsed()); + } + assertEquals(0, breaker.getUsed()); + } } } } @@ -219,18 +234,17 @@ public void testSearchConcurrently() throws IOException, InterruptedException { CountDownLatch latch = new CountDownLatch(numThreads); for (int i = 0; i < numThreads; i++) { threads[i] = new Thread(() -> { - try (Engine.Searcher searcher = frozenEngine.acquireSearcher("test")) { + try (Engine.SearcherSupplier reader = frozenEngine.acquireSearcherSupplier(Function.identity())) { barrier.await(); - FrozenEngine.unwrapLazyReader(searcher.getDirectoryReader()).release(); for (int j = 0; j < numIters; j++) { - FrozenEngine.unwrapLazyReader(searcher.getDirectoryReader()).reset(); - assertTrue(frozenEngine.isReaderOpen()); - TopDocs search = searcher.search(new MatchAllDocsQuery(), Math.min(10, numDocsAdded)); - assertEquals(search.scoreDocs.length, Math.min(10, numDocsAdded)); - FrozenEngine.unwrapLazyReader(searcher.getDirectoryReader()).release(); + try (Engine.Searcher searcher = reader.acquireSearcher("test")) { + assertTrue(frozenEngine.isReaderOpen()); + TopDocs search = searcher.search(new MatchAllDocsQuery(), Math.min(10, numDocsAdded)); + assertEquals(search.scoreDocs.length, Math.min(10, numDocsAdded)); + } } if (randomBoolean()) { - FrozenEngine.unwrapLazyReader(searcher.getDirectoryReader()).reset(); + reader.acquireSearcher("test").close(); } } catch (Exception e) { throw new AssertionError(e); @@ -270,12 +284,6 @@ private static void checkOverrideMethods(Class clazz) throws NoSuchMethodExce } } - // here we make sure we catch any change to their super classes FilterLeafReader / FilterDirectoryReader - public void testOverrideMethods() throws Exception { - checkOverrideMethods(FrozenEngine.LazyDirectoryReader.class); - checkOverrideMethods(FrozenEngine.LazyLeafReader.class); - } - private class CountingRefreshListener implements ReferenceManager.RefreshListener { final AtomicInteger afterRefresh = new AtomicInteger(0); @@ -309,25 +317,27 @@ public void testCanMatch() throws IOException { engine.flushAndClose(); listener.reset(); try (FrozenEngine frozenEngine = new FrozenEngine(engine.engineConfig, true)) { - DirectoryReader reader; - try (Engine.Searcher searcher = frozenEngine.acquireSearcher("can_match")) { - assertNotNull(ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(searcher.getDirectoryReader())); - assertEquals(config.getShardId(), ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(searcher - .getDirectoryReader()).shardId()); - reader = searcher.getDirectoryReader(); - assertNotEquals(reader, Matchers.instanceOf(FrozenEngine.LazyDirectoryReader.class)); - assertEquals(0, listener.afterRefresh.get()); - DirectoryReader unwrap = FilterDirectoryReader.unwrap(searcher.getDirectoryReader()); - assertThat(unwrap, Matchers.instanceOf(RewriteCachingDirectoryReader.class)); - assertNotNull(ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(searcher.getDirectoryReader())); + DirectoryReader dirReader; + try (Engine.SearcherSupplier reader = frozenEngine.acquireSearcherSupplier(Function.identity())) { + try (Engine.Searcher searcher = reader.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE)) { + dirReader = searcher.getDirectoryReader(); + assertNotNull(ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(searcher.getDirectoryReader())); + assertEquals(config.getShardId(), ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(searcher + .getDirectoryReader()).shardId()); + assertEquals(0, listener.afterRefresh.get()); + DirectoryReader unwrap = FilterDirectoryReader.unwrap(searcher.getDirectoryReader()); + assertThat(unwrap, Matchers.instanceOf(RewriteCachingDirectoryReader.class)); + assertNotNull(ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(searcher.getDirectoryReader())); + } } - try (Engine.Searcher searcher = frozenEngine.acquireSearcher("can_match")) { - assertSame(reader, searcher.getDirectoryReader()); - assertNotEquals(reader, Matchers.instanceOf(FrozenEngine.LazyDirectoryReader.class)); - assertEquals(0, listener.afterRefresh.get()); - DirectoryReader unwrap = FilterDirectoryReader.unwrap(searcher.getDirectoryReader()); - assertThat(unwrap, Matchers.instanceOf(RewriteCachingDirectoryReader.class)); + try (Engine.SearcherSupplier reader = frozenEngine.acquireSearcherSupplier(Function.identity())) { + try (Engine.Searcher searcher = reader.acquireSearcher(Engine.CAN_MATCH_SEARCH_SOURCE)) { + assertSame(dirReader, searcher.getDirectoryReader()); + assertEquals(0, listener.afterRefresh.get()); + DirectoryReader unwrap = FilterDirectoryReader.unwrap(searcher.getDirectoryReader()); + assertThat(unwrap, Matchers.instanceOf(RewriteCachingDirectoryReader.class)); + } } } } @@ -349,14 +359,18 @@ public void testSearchers() throws Exception { // See TransportVerifyShardBeforeCloseAction#executeShardOperation engine.flush(true, true); engine.refresh("test"); - try (Engine.Searcher searcher = engine.acquireSearcher("test")) { - totalDocs = searcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE).scoreDocs.length; + try (Engine.SearcherSupplier reader = engine.acquireSearcherSupplier(Function.identity())) { + try (Engine.Searcher searcher = reader.acquireSearcher("test")) { + totalDocs = searcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE).scoreDocs.length; + } } } try (FrozenEngine frozenEngine = new FrozenEngine(config, true)) { - try (Engine.Searcher searcher = frozenEngine.acquireSearcher("test")) { - TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE); - assertThat(topDocs.scoreDocs.length, equalTo(totalDocs)); + try (Engine.SearcherSupplier reader = frozenEngine.acquireSearcherSupplier(Function.identity())) { + try (Engine.Searcher searcher = reader.acquireSearcher("test")) { + TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE); + assertThat(topDocs.scoreDocs.length, equalTo(totalDocs)); + } } } } diff --git a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java index 27cd14576f8af..52349a1640a70 100644 --- a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java @@ -11,6 +11,9 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.search.action.ClosePointInTimeAction; +import org.elasticsearch.xpack.core.search.action.ClosePointInTimeRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; @@ -41,6 +44,9 @@ import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xpack.core.frozen.action.FreezeIndexAction; +import org.elasticsearch.xpack.core.search.action.OpenPointInTimeAction; +import org.elasticsearch.xpack.core.search.action.OpenPointInTimeRequest; +import org.elasticsearch.xpack.core.search.action.OpenPointInTimeResponse; import org.elasticsearch.xpack.frozen.FrozenIndices; import org.hamcrest.Matchers; @@ -62,10 +68,17 @@ public class FrozenIndexTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return pluginList(FrozenIndices.class); + return pluginList(FrozenIndices.class, XPackPlugin.class); } - public void testCloseFreezeAndOpen() { + String openReaders(TimeValue keepAlive, String... indices) { + OpenPointInTimeRequest request = new OpenPointInTimeRequest( + indices, IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED, keepAlive, null, null); + final OpenPointInTimeResponse response = client().execute(OpenPointInTimeAction.INSTANCE, request).actionGet(); + return response.getSearchContextId(); + } + + public void testCloseFreezeAndOpen() throws Exception { createIndex("index", Settings.builder().put("index.number_of_shards", 2).build()); client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); @@ -86,9 +99,7 @@ public void testCloseFreezeAndOpen() { assertEquals(useDFS ? 3 : 2, shard.refreshStats().getTotal()); assertFalse(((FrozenEngine)engine).isReaderOpen()); assertTrue(indexService.getIndexSettings().isSearchThrottled()); - try (Engine.Searcher searcher = shard.acquireSearcher("test")) { - assertNotNull(FrozenEngine.unwrapLazyReader(searcher.getDirectoryReader())); - } + // now scroll SearchResponse searchResponse = client().prepareSearch().setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) .setScroll(TimeValue.timeValueMinutes(1)).setSize(1).get(); @@ -100,13 +111,39 @@ public void testCloseFreezeAndOpen() { for (int i = 0; i < 2; i++) { shard = indexService.getShard(i); engine = IndexShardTestCase.getEngine(shard); - assertFalse(((FrozenEngine) engine).isReaderOpen()); + // scrolls keep the reader open + assertTrue(((FrozenEngine) engine).isReaderOpen()); } searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(1)).get(); } while (searchResponse.getHits().getHits().length > 0); + client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); + + String readerId = openReaders(TimeValue.timeValueMinutes(1), "index"); + try { + // now readerId + for (int from = 0; from < 3; from++) { + searchResponse = client().prepareSearch() + .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) + .setSearchContext(readerId, TimeValue.timeValueMinutes(1)) + .setSize(1) + .setFrom(from) + .get(); + assertHitCount(searchResponse, 3); + assertEquals(1, searchResponse.getHits().getHits().length); + SearchService searchService = getInstanceFromNode(SearchService.class); + assertThat(searchService.getActiveContexts(), Matchers.greaterThanOrEqualTo(1)); + for (int i = 0; i < 2; i++) { + shard = indexService.getShard(i); + engine = IndexShardTestCase.getEngine(shard); + assertFalse(((FrozenEngine) engine).isReaderOpen()); + } + } + } finally { + client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(searchResponse.pointInTimeId())).get(); + } } - public void testSearchAndGetAPIsAreThrottled() throws InterruptedException, IOException { + public void testSearchAndGetAPIsAreThrottled() throws IOException { XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "text").field("term_vector", "with_positions_offsets_payloads") .endObject().endObject() diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java index 6867db69b55b6..050fce8b93161 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/RBACEngine.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.get.MultiGetAction; import org.elasticsearch.action.index.IndexAction; +import org.elasticsearch.xpack.core.search.action.ClosePointInTimeAction; import org.elasticsearch.action.search.ClearScrollAction; import org.elasticsearch.action.search.MultiSearchAction; import org.elasticsearch.action.search.SearchScrollAction; @@ -284,6 +285,8 @@ public void authorizeIndexAction(RequestInfo requestInfo, AuthorizationInfo auth // the same as the user that submitted the original request so no additional checks are needed here. listener.onResponse(new IndexAuthorizationResult(true, IndicesAccessControl.ALLOW_NO_INDICES)); } + } else if (action.equals(ClosePointInTimeAction.NAME)) { + listener.onResponse(new IndexAuthorizationResult(true, IndicesAccessControl.ALLOW_NO_INDICES)); } else { assert false : "only scroll and async-search related requests are known indices api that don't " + "support retrieving the indices they relate to"; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListener.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListener.java index 0d9e2d55ceb86..d290382232250 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListener.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListener.java @@ -10,9 +10,10 @@ import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.search.SearchContextMissingException; +import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.SearchContextId; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.authc.Authentication; @@ -29,7 +30,7 @@ /** * A {@link SearchOperationListener} that is used to provide authorization for scroll requests. * - * In order to identify the user associated with a scroll request, we replace the {@link ScrollContext} + * In order to identify the user associated with a scroll request, we replace the {@link ReaderContext} * on creation with a custom implementation that holds the {@link Authentication} object. When * this context is accessed again in {@link SearchOperationListener#onPreQueryPhase(SearchContext)} * the ScrollContext is inspected for the authentication, which is compared to the currently @@ -51,37 +52,37 @@ public SecuritySearchOperationListener(SecurityContext securityContext, XPackLic * Adds the {@link Authentication} to the {@link ScrollContext} */ @Override - public void onNewScrollContext(SearchContext searchContext) { + public void onNewScrollContext(ReaderContext readerContext) { if (licenseState.isSecurityEnabled()) { - searchContext.scrollContext().putInContext(AuthenticationField.AUTHENTICATION_KEY, securityContext.getAuthentication()); + readerContext.putInContext(AuthenticationField.AUTHENTICATION_KEY, securityContext.getAuthentication()); // store the DLS and FLS permissions of the initial search request that created the scroll // this is then used to assert the DLS/FLS permission for the scroll search action IndicesAccessControl indicesAccessControl = securityContext.getThreadContext().getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); assert indicesAccessControl != null : "thread context does not contain index access control"; - searchContext.scrollContext().putInContext(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, indicesAccessControl); + readerContext.putInContext(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, indicesAccessControl); } } /** - * Checks for the {@link ScrollContext} if it exists and compares the {@link Authentication} + * Checks for the {@link ReaderContext} if it exists and compares the {@link Authentication} * object from the scroll context with the current authentication context */ @Override - public void validateSearchContext(SearchContext searchContext, TransportRequest request) { + public void validateSearchContext(ReaderContext readerContext, TransportRequest request) { if (licenseState.isSecurityEnabled()) { - if (searchContext.scrollContext() != null) { - final Authentication originalAuth = searchContext.scrollContext().getFromContext(AuthenticationField.AUTHENTICATION_KEY); + if (readerContext.scrollContext() != null) { + final Authentication originalAuth = readerContext.getFromContext(AuthenticationField.AUTHENTICATION_KEY); final Authentication current = securityContext.getAuthentication(); final ThreadContext threadContext = securityContext.getThreadContext(); final String action = threadContext.getTransient(ORIGINATING_ACTION_KEY); - ensureAuthenticatedUserIsSame(originalAuth, current, auditTrailService, searchContext.id(), action, request, + ensureAuthenticatedUserIsSame(originalAuth, current, auditTrailService, readerContext.id(), action, request, AuditUtil.extractRequestId(threadContext), threadContext.getTransient(AUTHORIZATION_INFO_KEY)); // piggyback on context validation to assert the DLS/FLS permissions on the thread context of the scroll search handler if (null == securityContext.getThreadContext().getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY)) { // fill in the DLS and FLS permissions for the scroll search action from the scroll context IndicesAccessControl scrollIndicesAccessControl = - searchContext.scrollContext().getFromContext(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + readerContext.getFromContext(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); assert scrollIndicesAccessControl != null : "scroll does not contain index access control"; securityContext.getThreadContext().putTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, scrollIndicesAccessControl); @@ -92,22 +93,22 @@ public void validateSearchContext(SearchContext searchContext, TransportRequest @Override public void onPreFetchPhase(SearchContext searchContext) { - ensureIndicesAccessControlForScrollThreadContext(searchContext); + ensureIndicesAccessControlForScrollThreadContext(searchContext.readerContext()); } @Override public void onPreQueryPhase(SearchContext searchContext) { - ensureIndicesAccessControlForScrollThreadContext(searchContext); + ensureIndicesAccessControlForScrollThreadContext(searchContext.readerContext()); } - void ensureIndicesAccessControlForScrollThreadContext(SearchContext searchContext) { - if (licenseState.isSecurityEnabled() && searchContext.scrollContext() != null) { + void ensureIndicesAccessControlForScrollThreadContext(ReaderContext readerContext) { + if (licenseState.isSecurityEnabled() && readerContext.scrollContext() != null) { IndicesAccessControl scrollIndicesAccessControl = - searchContext.scrollContext().getFromContext(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + readerContext.getFromContext(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); IndicesAccessControl threadIndicesAccessControl = securityContext.getThreadContext().getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); if (scrollIndicesAccessControl != threadIndicesAccessControl) { - throw new ElasticsearchSecurityException("[" + searchContext.id() + "] expected scroll indices access control [" + + throw new ElasticsearchSecurityException("[" + readerContext.id() + "] expected scroll indices access control [" + scrollIndicesAccessControl.toString() + "] but found [" + threadIndicesAccessControl.toString() + "] in thread " + "context"); } @@ -115,13 +116,13 @@ void ensureIndicesAccessControlForScrollThreadContext(SearchContext searchContex } /** - * Compares the {@link Authentication} that was stored in the {@link ScrollContext} with the + * Compares the {@link Authentication} that was stored in the {@link ReaderContext} with the * current authentication. We cannot guarantee that all of the details of the authentication will * be the same. Some things that could differ include the roles, the name of the authenticating * (or lookup) realm. To work around this we compare the username and the originating realm type. */ static void ensureAuthenticatedUserIsSame(Authentication original, Authentication current, AuditTrailService auditTrailService, - SearchContextId id, String action, TransportRequest request, String requestId, + ShardSearchContextId id, String action, TransportRequest request, String requestId, AuthorizationInfo authorizationInfo) { // this is really a best effort attempt since we cannot guarantee principal uniqueness // and realm names can change between nodes. diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java index 72d09aec8a140..3c48563ac327a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/DocumentLevelSecurityTests.java @@ -12,6 +12,8 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.xpack.core.search.action.ClosePointInTimeAction; +import org.elasticsearch.xpack.core.search.action.ClosePointInTimeRequest; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; @@ -63,6 +65,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.integration.FieldLevelSecurityTests.openSearchContext; import static org.elasticsearch.join.query.JoinQueryBuilders.hasChildQuery; import static org.elasticsearch.join.query.JoinQueryBuilders.hasParentQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -759,6 +762,46 @@ public void testScroll() throws Exception { } } + public void testReaderId() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)) + .setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + final int numVisible = scaledRandomIntBetween(2, 10); + final int numInvisible = scaledRandomIntBetween(2, 10); + int id = 1; + for (int i = 0; i < numVisible; i++) { + client().prepareIndex("test").setId(String.valueOf(id++)).setSource("field1", "value1").get(); + } + + for (int i = 0; i < numInvisible; i++) { + client().prepareIndex("test").setId(String.valueOf(id++)).setSource("field2", "value2").get(); + client().prepareIndex("test").setId(String.valueOf(id++)).setSource("field3", "value3").get(); + } + refresh(); + + String readerId = openSearchContext("user1", TimeValue.timeValueMinutes(1), "test"); + SearchResponse response = null; + try { + for (int from = 0; from < numVisible; from++) { + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch() + .setSize(1) + .setFrom(from) + .setSearchContext(readerId, TimeValue.timeValueMinutes(1)) + .setQuery(termQuery("field1", "value1")) + .get(); + assertNoFailures(response); + assertThat(response.getHits().getTotalHits().value, is((long) numVisible)); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + } + } finally { + client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(response.pointInTimeId())).actionGet(); + } + } + public void testRequestCache() throws Exception { assertAcked(client().admin().indices().prepareCreate("test") .setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java index 744ad1d371dd8..1ee30c51a2eeb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/FieldLevelSecurityTests.java @@ -11,6 +11,8 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.xpack.core.search.action.ClosePointInTimeAction; +import org.elasticsearch.xpack.core.search.action.ClosePointInTimeRequest; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; @@ -39,6 +41,9 @@ import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.search.action.OpenPointInTimeAction; +import org.elasticsearch.xpack.core.search.action.OpenPointInTimeRequest; +import org.elasticsearch.xpack.core.search.action.OpenPointInTimeResponse; import org.elasticsearch.xpack.security.LocalStateSecurity; import java.util.Arrays; @@ -673,7 +678,8 @@ public void testMSearchApi() throws Exception { public void testScroll() throws Exception { assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true)) + .setSettings(Settings.builder() + .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true)) .setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text") ); @@ -723,6 +729,52 @@ public void testScroll() throws Exception { } } + static String openSearchContext(String userName, TimeValue keepAlive, String... indices) { + OpenPointInTimeRequest request = new OpenPointInTimeRequest( + indices, OpenPointInTimeRequest.DEFAULT_INDICES_OPTIONS, keepAlive, null, null); + final OpenPointInTimeResponse response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(userName, USERS_PASSWD))) + .execute(OpenPointInTimeAction.INSTANCE, request).actionGet(); + return response.getSearchContextId(); + } + + public void testReaderId() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true)) + .setMapping("field1", "type=text", "field2", "type=text", "field3", "type=text") + ); + + final int numDocs = scaledRandomIntBetween(2, 10); + for (int i = 0; i < numDocs; i++) { + client().prepareIndex("test").setId(String.valueOf(i)) + .setSource("field1", "value1", "field2", "value2", "field3", "value3") + .get(); + } + refresh("test"); + + String readerId = openSearchContext("user1", TimeValue.timeValueMinutes(1), "test"); + SearchResponse response = null; + try { + for (int from = 0; from < numDocs; from++) { + response = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))) + .prepareSearch() + .setSearchContext(readerId, TimeValue.timeValueMinutes(1L)) + .setSize(1) + .setFrom(from) + .setQuery(constantScoreQuery(termQuery("field1", "value1"))) + .setFetchSource(true) + .get(); + assertThat(response.getHits().getTotalHits().value, is((long) numDocs)); + assertThat(response.getHits().getHits().length, is(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().size(), is(1)); + assertThat(response.getHits().getAt(0).getSourceAsMap().get("field1"), is("value1")); + } + } finally { + client().execute(ClosePointInTimeAction.INSTANCE, new ClosePointInTimeRequest(readerId)).actionGet(); + } + } + public void testQueryCache() throws Exception { assertAcked(client().admin().indices().prepareCreate("test") .setSettings(Settings.builder().put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true)) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java index 31cfc676fd152..729c969f9c0ab 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SecuritySearchOperationListenerTests.java @@ -10,16 +10,18 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.license.XPackLicenseState.Feature; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchContextMissingException; import org.elasticsearch.search.internal.InternalScrollSearchRequest; -import org.elasticsearch.search.internal.ScrollContext; +import org.elasticsearch.search.internal.LegacyReaderContext; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.SearchContextId; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.TestSearchContext; +import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequest.Empty; import org.elasticsearch.xpack.core.security.SecurityContext; @@ -32,6 +34,7 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.audit.AuditTrail; import org.elasticsearch.xpack.security.audit.AuditTrailService; +import org.junit.Before; import org.mockito.Mockito; import java.util.Collections; @@ -51,145 +54,163 @@ import static org.mockito.Mockito.verifyZeroInteractions; import static org.mockito.Mockito.when; -public class SecuritySearchOperationListenerTests extends ESTestCase { +public class SecuritySearchOperationListenerTests extends ESSingleNodeTestCase { + private IndexService indexService; + private IndexShard shard; + + @Before + public void setupShard() { + indexService = createIndex("index"); + shard = indexService.getShard(0); + } public void testUnlicensed() { - XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(false); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); - AuditTrailService auditTrailService = mock(AuditTrailService.class); - SearchContext searchContext = mock(SearchContext.class); - when(searchContext.scrollContext()).thenReturn(new ScrollContext()); + final ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class); + when(shardSearchRequest.scroll()).thenReturn(new Scroll(TimeValue.timeValueMinutes(between(1, 10)))); + try (LegacyReaderContext readerContext = + new LegacyReaderContext(0L, indexService, shard, shard.acquireSearcherSupplier(), shardSearchRequest, Long.MAX_VALUE)) { + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(false); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); + AuditTrailService auditTrailService = mock(AuditTrailService.class); + SearchContext searchContext = mock(SearchContext.class); - SecuritySearchOperationListener listener = new SecuritySearchOperationListener(securityContext, licenseState, auditTrailService); - listener.onNewScrollContext(searchContext); - listener.validateSearchContext(searchContext, Empty.INSTANCE); - verify(licenseState, times(2)).isSecurityEnabled(); - verifyZeroInteractions(auditTrailService, searchContext); + SecuritySearchOperationListener listener = + new SecuritySearchOperationListener(securityContext, licenseState, auditTrailService); + listener.onNewScrollContext(readerContext); + listener.validateSearchContext(readerContext, Empty.INSTANCE); + verify(licenseState, times(2)).isSecurityEnabled(); + verifyZeroInteractions(auditTrailService, searchContext); + } } public void testOnNewContextSetsAuthentication() throws Exception { - TestScrollSearchContext testSearchContext = new TestScrollSearchContext(); - testSearchContext.scrollContext(new ScrollContext()); - final Scroll scroll = new Scroll(TimeValue.timeValueSeconds(2L)); - testSearchContext.scrollContext().scroll = scroll; - XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(true); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); - AuditTrailService auditTrailService = mock(AuditTrailService.class); - Authentication authentication = new Authentication(new User("test", "role"), new RealmRef("realm", "file", "node"), null); - authentication.writeToContext(threadContext); - IndicesAccessControl indicesAccessControl = mock(IndicesAccessControl.class); - threadContext.putTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, indicesAccessControl); - - SecuritySearchOperationListener listener = new SecuritySearchOperationListener(securityContext, licenseState, auditTrailService); - listener.onNewScrollContext(testSearchContext); + final ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class); + when(shardSearchRequest.scroll()).thenReturn(new Scroll(TimeValue.timeValueMinutes(between(1, 10)))); + try (LegacyReaderContext readerContext = + new LegacyReaderContext(0L, indexService, shard, shard.acquireSearcherSupplier(), shardSearchRequest, Long.MAX_VALUE)) { + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(true); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); + AuditTrailService auditTrailService = mock(AuditTrailService.class); + Authentication authentication = new Authentication(new User("test", "role"), new RealmRef("realm", "file", "node"), null); + authentication.writeToContext(threadContext); + IndicesAccessControl indicesAccessControl = mock(IndicesAccessControl.class); + threadContext.putTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, indicesAccessControl); - Authentication contextAuth = testSearchContext.scrollContext().getFromContext(AuthenticationField.AUTHENTICATION_KEY); - assertEquals(authentication, contextAuth); - assertEquals(scroll, testSearchContext.scrollContext().scroll); + SecuritySearchOperationListener listener = + new SecuritySearchOperationListener(securityContext, licenseState, auditTrailService); + listener.onNewScrollContext(readerContext); - assertThat(testSearchContext.scrollContext().getFromContext(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), - is(indicesAccessControl)); + Authentication contextAuth = readerContext.getFromContext(AuthenticationField.AUTHENTICATION_KEY); + assertEquals(authentication, contextAuth); + assertThat(readerContext.getFromContext(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), is(indicesAccessControl)); - verify(licenseState).isSecurityEnabled(); - verifyZeroInteractions(auditTrailService); + verify(licenseState).isSecurityEnabled(); + verifyZeroInteractions(auditTrailService); + } } public void testValidateSearchContext() throws Exception { - TestScrollSearchContext testSearchContext = new TestScrollSearchContext(); - testSearchContext.scrollContext(new ScrollContext()); - testSearchContext.scrollContext().putInContext(AuthenticationField.AUTHENTICATION_KEY, + final ShardSearchRequest shardSearchRequest = mock(ShardSearchRequest.class); + when(shardSearchRequest.scroll()).thenReturn(new Scroll(TimeValue.timeValueMinutes(between(1, 10)))); + try (LegacyReaderContext readerContext = + new LegacyReaderContext(0L, indexService, shard, shard.acquireSearcherSupplier(), shardSearchRequest, Long.MAX_VALUE)) { + readerContext.putInContext(AuthenticationField.AUTHENTICATION_KEY, new Authentication(new User("test", "role"), new RealmRef("realm", "file", "node"), null)); - final IndicesAccessControl indicesAccessControl = mock(IndicesAccessControl.class); - testSearchContext.scrollContext().putInContext(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, indicesAccessControl); - testSearchContext.scrollContext().scroll = new Scroll(TimeValue.timeValueSeconds(2L)); - XPackLicenseState licenseState = mock(XPackLicenseState.class); - when(licenseState.isSecurityEnabled()).thenReturn(true); - when(licenseState.checkFeature(Feature.SECURITY_AUDITING)).thenReturn(true); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); - AuditTrail auditTrail = mock(AuditTrail.class); - AuditTrailService auditTrailService = new AuditTrailService(Collections.singletonList(auditTrail), licenseState); + final IndicesAccessControl indicesAccessControl = mock(IndicesAccessControl.class); + readerContext.putInContext(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, indicesAccessControl); + XPackLicenseState licenseState = mock(XPackLicenseState.class); + when(licenseState.isSecurityEnabled()).thenReturn(true); + when(licenseState.checkFeature(Feature.SECURITY_AUDITING)).thenReturn(true); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final SecurityContext securityContext = new SecurityContext(Settings.EMPTY, threadContext); + AuditTrail auditTrail = mock(AuditTrail.class); + AuditTrailService auditTrailService = + new AuditTrailService(Collections.singletonList(auditTrail), licenseState); - SecuritySearchOperationListener listener = new SecuritySearchOperationListener(securityContext, licenseState, auditTrailService); - try (StoredContext ignore = threadContext.newStoredContext(false)) { - Authentication authentication = new Authentication(new User("test", "role"), new RealmRef("realm", "file", "node"), null); - authentication.writeToContext(threadContext); - listener.validateSearchContext(testSearchContext, Empty.INSTANCE); - assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), is(indicesAccessControl)); - verify(licenseState).isSecurityEnabled(); - verifyZeroInteractions(auditTrail); - } + SecuritySearchOperationListener listener = + new SecuritySearchOperationListener(securityContext, licenseState, auditTrailService); + try (StoredContext ignore = threadContext.newStoredContext(false)) { + Authentication authentication = new Authentication(new User("test", "role"), new RealmRef("realm", "file", "node"), null); + authentication.writeToContext(threadContext); + listener.validateSearchContext(readerContext, Empty.INSTANCE); + assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), is(indicesAccessControl)); + verify(licenseState).isSecurityEnabled(); + verifyZeroInteractions(auditTrail); + } - try (StoredContext ignore = threadContext.newStoredContext(false)) { - final String nodeName = randomAlphaOfLengthBetween(1, 8); - final String realmName = randomAlphaOfLengthBetween(1, 16); - Authentication authentication = new Authentication(new User("test", "role"), new RealmRef(realmName, "file", nodeName), null); - authentication.writeToContext(threadContext); - listener.validateSearchContext(testSearchContext, Empty.INSTANCE); - assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), is(indicesAccessControl)); - verify(licenseState, times(2)).isSecurityEnabled(); - verifyZeroInteractions(auditTrail); - } + try (StoredContext ignore = threadContext.newStoredContext(false)) { + final String nodeName = randomAlphaOfLengthBetween(1, 8); + final String realmName = randomAlphaOfLengthBetween(1, 16); + Authentication authentication = + new Authentication(new User("test", "role"), new RealmRef(realmName, "file", nodeName), null); + authentication.writeToContext(threadContext); + listener.validateSearchContext(readerContext, Empty.INSTANCE); + assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), is(indicesAccessControl)); + verify(licenseState, times(2)).isSecurityEnabled(); + verifyZeroInteractions(auditTrail); + } - try (StoredContext ignore = threadContext.newStoredContext(false)) { - final String nodeName = randomBoolean() ? "node" : randomAlphaOfLengthBetween(1, 8); - final String realmName = randomBoolean() ? "realm" : randomAlphaOfLengthBetween(1, 16); - final String type = randomAlphaOfLengthBetween(5, 16); - Authentication authentication = new Authentication(new User("test", "role"), new RealmRef(realmName, type, nodeName), null); - authentication.writeToContext(threadContext); - threadContext.putTransient(ORIGINATING_ACTION_KEY, "action"); - threadContext.putTransient(AUTHORIZATION_INFO_KEY, - (AuthorizationInfo) () -> Collections.singletonMap(PRINCIPAL_ROLES_FIELD_NAME, authentication.getUser().roles())); - final InternalScrollSearchRequest request = new InternalScrollSearchRequest(); - SearchContextMissingException expected = - expectThrows(SearchContextMissingException.class, () -> listener.validateSearchContext(testSearchContext, request)); - assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), nullValue()); - assertEquals(testSearchContext.id(), expected.contextId()); - verify(licenseState, Mockito.atLeast(3)).isSecurityEnabled(); - verify(auditTrail).accessDenied(eq(null), eq(authentication), eq("action"), eq(request), - authzInfoRoles(authentication.getUser().roles())); - } + try (StoredContext ignore = threadContext.newStoredContext(false)) { + final String nodeName = randomBoolean() ? "node" : randomAlphaOfLengthBetween(1, 8); + final String realmName = randomBoolean() ? "realm" : randomAlphaOfLengthBetween(1, 16); + final String type = randomAlphaOfLengthBetween(5, 16); + Authentication authentication = + new Authentication(new User("test", "role"), new RealmRef(realmName, type, nodeName), null); + authentication.writeToContext(threadContext); + threadContext.putTransient(ORIGINATING_ACTION_KEY, "action"); + threadContext.putTransient(AUTHORIZATION_INFO_KEY, + (AuthorizationInfo) () -> Collections.singletonMap(PRINCIPAL_ROLES_FIELD_NAME, authentication.getUser().roles())); + final InternalScrollSearchRequest request = new InternalScrollSearchRequest(); + SearchContextMissingException expected = expectThrows(SearchContextMissingException.class, + () -> listener.validateSearchContext(readerContext, request)); + assertEquals(readerContext.id(), expected.contextId()); + assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), nullValue()); + verify(licenseState, Mockito.atLeast(3)).isSecurityEnabled(); + verify(auditTrail).accessDenied(eq(null), eq(authentication), eq("action"), eq(request), + authzInfoRoles(authentication.getUser().roles())); + } - // another user running as the original user - try (StoredContext ignore = threadContext.newStoredContext(false)) { - final String nodeName = randomBoolean() ? "node" : randomAlphaOfLengthBetween(1, 8); - final String realmName = randomBoolean() ? "realm" : randomAlphaOfLengthBetween(1, 16); - final String type = randomAlphaOfLengthBetween(5, 16); - User user = new User(new User("test", "role"), new User("authenticated", "runas")); - Authentication authentication = new Authentication(user, new RealmRef(realmName, type, nodeName), + // another user running as the original user + try (StoredContext ignore = threadContext.newStoredContext(false)) { + final String nodeName = randomBoolean() ? "node" : randomAlphaOfLengthBetween(1, 8); + final String realmName = randomBoolean() ? "realm" : randomAlphaOfLengthBetween(1, 16); + final String type = randomAlphaOfLengthBetween(5, 16); + User user = new User(new User("test", "role"), new User("authenticated", "runas")); + Authentication authentication = new Authentication(user, new RealmRef(realmName, type, nodeName), new RealmRef(randomAlphaOfLengthBetween(1, 16), "file", nodeName)); - authentication.writeToContext(threadContext); - threadContext.putTransient(ORIGINATING_ACTION_KEY, "action"); - final InternalScrollSearchRequest request = new InternalScrollSearchRequest(); - listener.validateSearchContext(testSearchContext, request); - assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), is(indicesAccessControl)); - verify(licenseState, Mockito.atLeast(4)).isSecurityEnabled(); - verifyNoMoreInteractions(auditTrail); - } + authentication.writeToContext(threadContext); + threadContext.putTransient(ORIGINATING_ACTION_KEY, "action"); + final InternalScrollSearchRequest request = new InternalScrollSearchRequest(); + listener.validateSearchContext(readerContext, request); + assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), is(indicesAccessControl)); + verify(licenseState, Mockito.atLeast(4)).isSecurityEnabled(); + verifyNoMoreInteractions(auditTrail); + } - // the user that authenticated for the run as request - try (StoredContext ignore = threadContext.newStoredContext(false)) { - final String nodeName = randomBoolean() ? "node" : randomAlphaOfLengthBetween(1, 8); - final String realmName = randomBoolean() ? "realm" : randomAlphaOfLengthBetween(1, 16); - final String type = randomAlphaOfLengthBetween(5, 16); - Authentication authentication = + // the user that authenticated for the run as request + try (StoredContext ignore = threadContext.newStoredContext(false)) { + final String nodeName = randomBoolean() ? "node" : randomAlphaOfLengthBetween(1, 8); + final String realmName = randomBoolean() ? "realm" : randomAlphaOfLengthBetween(1, 16); + final String type = randomAlphaOfLengthBetween(5, 16); + Authentication authentication = new Authentication(new User("authenticated", "runas"), new RealmRef(realmName, type, nodeName), null); - authentication.writeToContext(threadContext); - threadContext.putTransient(ORIGINATING_ACTION_KEY, "action"); - threadContext.putTransient(AUTHORIZATION_INFO_KEY, - (AuthorizationInfo) () -> Collections.singletonMap(PRINCIPAL_ROLES_FIELD_NAME, authentication.getUser().roles())); - final InternalScrollSearchRequest request = new InternalScrollSearchRequest(); - SearchContextMissingException expected = - expectThrows(SearchContextMissingException.class, () -> listener.validateSearchContext(testSearchContext, request)); - assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), nullValue()); - assertEquals(testSearchContext.id(), expected.contextId()); - verify(licenseState, Mockito.atLeast(5)).isSecurityEnabled(); - verify(auditTrail).accessDenied(eq(null), eq(authentication), eq("action"), eq(request), - authzInfoRoles(authentication.getUser().roles())); + authentication.writeToContext(threadContext); + threadContext.putTransient(ORIGINATING_ACTION_KEY, "action"); + threadContext.putTransient(AUTHORIZATION_INFO_KEY, + (AuthorizationInfo) () -> Collections.singletonMap(PRINCIPAL_ROLES_FIELD_NAME, authentication.getUser().roles())); + final InternalScrollSearchRequest request = new InternalScrollSearchRequest(); + SearchContextMissingException expected = expectThrows(SearchContextMissingException.class, + () -> listener.validateSearchContext(readerContext, request)); + assertEquals(readerContext.id(), expected.contextId()); + assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), nullValue()); + verify(licenseState, Mockito.atLeast(5)).isSecurityEnabled(); + verify(auditTrail).accessDenied(eq(null), eq(authentication), eq("action"), eq(request), + authzInfoRoles(authentication.getUser().roles())); + } } } @@ -197,7 +218,7 @@ public void testEnsuredAuthenticatedUserIsSame() { Authentication original = new Authentication(new User("test", "role"), new RealmRef("realm", "file", "node"), null); Authentication current = randomBoolean() ? original : new Authentication(new User("test", "role"), new RealmRef("realm", "file", "node"), null); - SearchContextId contextId = new SearchContextId(UUIDs.randomBase64UUID(), randomLong()); + ShardSearchContextId contextId = new ShardSearchContextId(UUIDs.randomBase64UUID(), randomLong()); final String action = randomAlphaOfLength(4); TransportRequest request = Empty.INSTANCE; XPackLicenseState licenseState = mock(XPackLicenseState.class); @@ -266,24 +287,4 @@ public void testEnsuredAuthenticatedUserIsSame() { verify(auditTrail).accessDenied(eq(auditId), eq(runAsDiffType), eq(action), eq(request), authzInfoRoles(original.getUser().roles())); } - - static class TestScrollSearchContext extends TestSearchContext { - - private ScrollContext scrollContext; - - TestScrollSearchContext() { - super(null); - } - - @Override - public ScrollContext scrollContext() { - return scrollContext; - } - - @Override - public SearchContext scrollContext(ScrollContext scrollContext) { - this.scrollContext = scrollContext; - return this; - } - } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/close_point_in_time.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/close_point_in_time.json new file mode 100644 index 0000000000000..541d299174ccb --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/close_point_in_time.json @@ -0,0 +1,23 @@ +{ + "close_point_in_time":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time.html", + "description":"Close a point in time" + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_pit", + "methods":[ + "DELETE" + ] + } + ] + }, + "params":{}, + "body":{ + "description": "a point-in-time id to close" + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/open_point_in_time.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/open_point_in_time.json new file mode 100644 index 0000000000000..343ca6a852f2a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/open_point_in_time.json @@ -0,0 +1,61 @@ +{ + "open_point_in_time":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/point-in-time.html", + "description":"Open a point in time that can be used in subsequent searches" + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_pit", + "methods":[ + "POST" + ] + }, + { + "path":"/{index}/_pit", + "methods":[ + "POST" + ], + "parts":{ + "index":{ + "type":"list", + "description":"A comma-separated list of index names to open point in time; use `_all` or empty string to perform the operation on all indices" + } + } + } + ] + }, + "params":{ + "preference":{ + "type":"string", + "description":"Specify the node or shard the operation should be performed on (default: random)" + }, + "routing":{ + "type":"string", + "description":"Specific routing value" + }, + "ignore_unavailable":{ + "type":"boolean", + "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)" + }, + "expand_wildcards":{ + "type":"enum", + "options":[ + "open", + "closed", + "hidden", + "none", + "all" + ], + "default":"open", + "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." + }, + "keep_alive": { + "type": "string", + "description": "Specific the time to live for the point in time" + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/search/point_in_time.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/search/point_in_time.yml new file mode 100644 index 0000000000000..52e5708179234 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/search/point_in_time.yml @@ -0,0 +1,173 @@ +setup: + - do: + indices.create: + index: test + - do: + index: + index: test + id: 1 + body: { id: 1, foo: bar, age: 18 } + + - do: + index: + index: test + id: 42 + body: { id: 42, foo: bar, age: 18 } + + - do: + index: + index: test + id: 172 + body: { id: 172, foo: bar, age: 24 } + + - do: + indices.create: + index: test2 + + - do: + index: + index: test2 + id: 45 + body: { id: 45, foo: bar, age: 19 } + + - do: + indices.refresh: + index: "test*" + +--- +"basic": + - skip: + version: " - 7.99.99" + reason: "point in time is introduced in 8.0" + - do: + open_point_in_time: + index: test + keep_alive: 5m + - set: {id: point_in_time_id} + + - do: + search: + rest_total_hits_as_int: true + body: + size: 1 + query: + match: + foo: bar + sort: [{ age: desc }, { id: desc }] + pit: + id: "$point_in_time_id" + keep_alive: 1m + + - match: {hits.total: 3 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._index: test } + - match: {hits.hits.0._id: "172" } + - match: {hits.hits.0.sort: [24, 172] } + + - do: + index: + index: test + id: 100 + body: { id: 100, foo: bar, age: 23 } + - do: + indices.refresh: + index: test + + # search with a point in time + - do: + search: + rest_total_hits_as_int: true + body: + size: 1 + query: + match: + foo: bar + sort: [{ age: desc }, { id: desc }] + search_after: [24, 172] + pit: + id: "$point_in_time_id" + keep_alive: 1m + + - match: {hits.total: 3 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._index: test } + - match: {hits.hits.0._id: "42" } + - match: {hits.hits.0.sort: [18, 42] } + + - do: + search: + rest_total_hits_as_int: true + body: + size: 1 + query: + match: + foo: bar + sort: [ { age: desc }, { id: desc } ] + search_after: [18, 42] + pit: + id: "$point_in_time_id" + keep_alive: 1m + + - match: {hits.total: 3} + - length: {hits.hits: 1 } + - match: {hits.hits.0._index: test } + - match: {hits.hits.0._id: "1" } + - match: {hits.hits.0.sort: [18, 1] } + + - do: + search: + rest_total_hits_as_int: true + body: + size: 1 + query: + match: + foo: bar + sort: [{ age: desc }, { id: desc } ] + search_after: [18, 1] + pit: + id: "$point_in_time_id" + keep_alive: 1m + + - match: {hits.total: 3} + - length: {hits.hits: 0 } + + - do: + close_point_in_time: + body: + id: "$point_in_time_id" + +--- +"wildcard": + - skip: + version: " - 7.99.99" + reason: "point in time is introduced in 8.0" + - do: + open_point_in_time: + index: "t*" + keep_alive: 5m + - set: {id: point_in_time_id} + + - do: + search: + rest_total_hits_as_int: true + body: + size: 2 + query: + match: + foo: bar + sort: [{ age: desc }, { id: desc }] + pit: + id: "$point_in_time_id" + keep_alive: 1m + + - match: {hits.total: 4 } + - length: {hits.hits: 2 } + - match: {hits.hits.0._index: test } + - match: {hits.hits.0._id: "172" } + - match: {hits.hits.1._index: test2 } + - match: {hits.hits.1._id: "45" } + + - do: + close_point_in_time: + body: + id: "$point_in_time_id"