Skip to content

Commit

Permalink
Remove BigArrays from SearchContext (elastic#65981)
Browse files Browse the repository at this point in the history
We've been trying to shrink the big, mutable `SearchContext`. I'm doing
my part by removing `BigArrays` from it. Doing that required reworking
how we test `Aggregator`s to not need `SearchContext`. So I did that
too. Mostly. `top_hits` still needs a `SubSearchContext` which we can
still build, but it is now quite contained.
  • Loading branch information
nik9000 authored Dec 8, 2020
1 parent f0a55bb commit 3e45318
Show file tree
Hide file tree
Showing 21 changed files with 214 additions and 263 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,9 @@
import org.apache.lucene.search.Query;
import org.elasticsearch.action.search.SearchShardTask;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
Expand Down Expand Up @@ -83,9 +81,7 @@ final class DefaultSearchContext extends SearchContext {
private final SearchShardTarget shardTarget;
private final LongSupplier relativeTimeSupplier;
private SearchType searchType;
private final BigArrays bigArrays;
private final IndexShard indexShard;
private final ClusterService clusterService;
private final IndexService indexService;
private final ContextIndexSearcher searcher;
private final DfsSearchResult dfsResult;
Expand Down Expand Up @@ -146,8 +142,6 @@ final class DefaultSearchContext extends SearchContext {
DefaultSearchContext(ReaderContext readerContext,
ShardSearchRequest request,
SearchShardTarget shardTarget,
ClusterService clusterService,
BigArrays bigArrays,
LongSupplier relativeTimeSupplier,
TimeValue timeout,
FetchPhase fetchPhase,
Expand All @@ -157,14 +151,11 @@ final class DefaultSearchContext extends SearchContext {
this.fetchPhase = fetchPhase;
this.searchType = request.searchType();
this.shardTarget = shardTarget;
// SearchContexts use a BigArrays that can circuit break
this.bigArrays = bigArrays.withCircuitBreaking();
this.dfsResult = new DfsSearchResult(readerContext.id(), shardTarget, request);
this.queryResult = new QuerySearchResult(readerContext.id(), shardTarget, request);
this.fetchResult = new FetchSearchResult(readerContext.id(), shardTarget);
this.indexService = readerContext.indexService();
this.indexShard = readerContext.indexShard();
this.clusterService = clusterService;

Engine.Searcher engineSearcher = readerContext.acquireSearcher("search");
this.searcher = new ContextIndexSearcher(engineSearcher.getIndexReader(), engineSearcher.getSimilarity(),
Expand Down Expand Up @@ -457,11 +448,6 @@ public IndexShard indexShard() {
return this.indexShard;
}

@Override
public BigArrays bigArrays() {
return bigArrays;
}

@Override
public BitsetFilterCache bitsetFilterCache() {
return indexService.cache().bitsetFilterCache();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -768,8 +768,8 @@ private DefaultSearchContext createSearchContext(ReaderContext reader, ShardSear
try {
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(),
reader.indexShard().shardId(), request.getClusterAlias(), OriginalIndices.NONE);
searchContext = new DefaultSearchContext(reader, request, shardTarget, clusterService,
bigArrays, threadPool::relativeTimeInMillis, timeout, fetchPhase, lowLevelCancellation);
searchContext = new DefaultSearchContext(reader, request, shardTarget,
threadPool::relativeTimeInMillis, timeout, fetchPhase, lowLevelCancellation);
// we clone the query shard context here just for rewriting otherwise we
// might end up with incorrect state since we are using now() or script services
// during rewrite and normalized / evaluate templates etc.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryRewriteContext;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
import org.elasticsearch.search.aggregations.AggregatorFactories;
Expand Down Expand Up @@ -401,11 +400,6 @@ public String getType() {
return NAME;
}

@Override
protected AggregationBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException {
return super.doRewrite(queryShardContext);
}

@Override
protected ValuesSourceRegistry.RegistryKey<?> getRegistryKey() {
return REGISTRY_KEY;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,12 @@ public final AggregationUsageService getUsageService() {
*/
public abstract Analyzer getIndexAnalyzer(Function<String, NamedAnalyzer> unindexedFieldAnalyzer);

/**
* Is this request cacheable? Requests that have
* non-deterministic queries or scripts aren't cachable.
*/
public abstract boolean isCacheable();

/**
* Implementation of {@linkplain AggregationContext} for production usage
* that wraps our ubiquitous {@link QueryShardContext} and anything else
Expand All @@ -239,6 +245,7 @@ public final AggregationUsageService getUsageService() {
*/
public static class ProductionAggregationContext extends AggregationContext {
private final QueryShardContext context;
private final BigArrays bigArrays;
private final Query topLevelQuery;
private final AggregationProfiler profiler;
private final MultiBucketConsumer multiBucketConsumer;
Expand Down Expand Up @@ -277,6 +284,7 @@ public ProductionAggregationContext(
Supplier<Boolean> isCancelled
) {
this.context = context;
this.bigArrays = context.bigArrays().withCircuitBreaking(); // We can break in searches.
this.topLevelQuery = topLevelQuery;
this.profiler = profiler;
this.multiBucketConsumer = multiBucketConsumer;
Expand Down Expand Up @@ -343,7 +351,7 @@ public ValuesSourceRegistry getValuesSourceRegistry() {

@Override
public BigArrays bigArrays() {
return context.bigArrays();
return bigArrays;
}

@Override
Expand Down Expand Up @@ -425,5 +433,10 @@ public CircuitBreaker breaker() {
public Analyzer getIndexAnalyzer(Function<String, NamedAnalyzer> unindexedFieldAnalyzer) {
return context.getIndexAnalyzer(unindexedFieldAnalyzer);
}

@Override
public boolean isCacheable() {
return context.isCacheable();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
import org.elasticsearch.action.search.SearchShardTask;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardContext;
Expand Down Expand Up @@ -200,11 +199,6 @@ public IndexShard indexShard() {
return in.indexShard();
}

@Override
public BigArrays bigArrays() {
return in.bigArrays();
}

@Override
public BitsetFilterCache bitsetFilterCache() {
return in.bitsetFilterCache();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardContext;
Expand Down Expand Up @@ -210,8 +209,6 @@ public final void assignRescoreDocIds(RescoreDocIds rescoreDocIds) {

public abstract IndexShard indexShard();

public abstract BigArrays bigArrays(); // TODO this is only used in aggs land and should be contained

public abstract BitsetFilterCache bitsetFilterCache();

public abstract TimeValue timeout();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
import org.elasticsearch.common.logging.MockAppender;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.builder.SearchSourceBuilder;
Expand Down Expand Up @@ -82,10 +81,9 @@ protected SearchContext createSearchContext(IndexService indexService) {
}

protected SearchContext createSearchContext(IndexService indexService, String... groupStats) {
BigArrays bigArrays = indexService.getBigArrays();
final ShardSearchRequest request =
new ShardSearchRequest(new ShardId(indexService.index(), 0), 0L, null);
return new TestSearchContext(bigArrays, indexService) {
return new TestSearchContext(indexService) {
@Override
public List<String> groupStats() {
return Arrays.asList(groupStats);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
*/
package org.elasticsearch.index.shard;

import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.internal.ReaderContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.ESTestCase;
Expand Down Expand Up @@ -139,7 +140,7 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest
Collections.shuffle(indexingOperationListeners, random());
SearchOperationListener.CompositeListener compositeListener =
new SearchOperationListener.CompositeListener(indexingOperationListeners, logger);
SearchContext ctx = new TestSearchContext(null);
SearchContext ctx = new TestSearchContext((QueryShardContext) null);
compositeListener.onQueryPhase(ctx, timeInNanos.get());
assertEquals(0, preFetch.get());
assertEquals(0, preQuery.get());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,6 @@
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.MockBigArrays;
import org.elasticsearch.common.util.MockPageCacheRecycler;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.cache.IndexCache;
Expand All @@ -49,7 +46,6 @@
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.LegacyReaderContext;
import org.elasticsearch.search.internal.ReaderContext;
Expand Down Expand Up @@ -122,8 +118,6 @@ public void testPreProcess() throws Exception {
when(indexService.getIndexSettings()).thenReturn(indexSettings);
when(mapperService.getIndexSettings()).thenReturn(indexSettings);

BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());

try (Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {

Expand All @@ -150,7 +144,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) {
ReaderContext readerWithoutScroll = new ReaderContext(
newContextId(), indexService, indexShard, searcherSupplier.get(), randomNonNegativeLong(), false);
DefaultSearchContext contextWithoutScroll = new DefaultSearchContext(readerWithoutScroll, shardSearchRequest, target, null,
bigArrays, null, timeout, null, false);
timeout, null, false);
contextWithoutScroll.from(300);
contextWithoutScroll.close();

Expand All @@ -166,7 +160,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) {
ReaderContext readerContext = new LegacyReaderContext(
newContextId(), indexService, indexShard, searcherSupplier.get(), shardSearchRequest, randomNonNegativeLong());
DefaultSearchContext context1 = new DefaultSearchContext(readerContext, shardSearchRequest, target, null,
bigArrays, null, timeout, null, false);
timeout, null, false);
context1.from(300);
exception = expectThrows(IllegalArgumentException.class, () -> context1.preProcess(false));
assertThat(exception.getMessage(), equalTo("Batch size is too large, size must be less than or equal to: ["
Expand Down Expand Up @@ -200,7 +194,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) {
newContextId(), indexService, indexShard, searcherSupplier.get(), randomNonNegativeLong(), false);
// rescore is null but sliceBuilder is not null
DefaultSearchContext context2 = new DefaultSearchContext(readerContext, shardSearchRequest, target,
null, bigArrays, null, timeout, null, false);
null, timeout, null, false);

SliceBuilder sliceBuilder = mock(SliceBuilder.class);
int numSlices = maxSlicesPerScroll + randomIntBetween(1, 100);
Expand All @@ -217,7 +211,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) {
when(shardSearchRequest.indexBoost()).thenReturn(AbstractQueryBuilder.DEFAULT_BOOST);

DefaultSearchContext context3 = new DefaultSearchContext(readerContext, shardSearchRequest, target, null,
bigArrays, null, timeout, null, false);
timeout, null, false);
ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery();
context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(false);
assertEquals(context3.query(), context3.buildFilteredQuery(parsedQuery.query()));
Expand All @@ -229,7 +223,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) {
readerContext = new ReaderContext(newContextId(), indexService, indexShard,
searcherSupplier.get(), randomNonNegativeLong(), false);
DefaultSearchContext context4 =
new DefaultSearchContext(readerContext, shardSearchRequest, target, null, bigArrays, null, timeout, null, false);
new DefaultSearchContext(readerContext, shardSearchRequest, target, null, timeout, null, false);
context4.sliceBuilder(new SliceBuilder(1,2)).parsedQuery(parsedQuery).preProcess(false);
Query query1 = context4.query();
context4.sliceBuilder(new SliceBuilder(0,2)).parsedQuery(parsedQuery).preProcess(false);
Expand All @@ -256,8 +250,6 @@ public void testClearQueryCancellationsOnClose() throws IOException {

IndexService indexService = mock(IndexService.class);

BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());

try (Directory dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir)) {

Expand All @@ -282,7 +274,7 @@ protected Engine.Searcher acquireSearcherInternal(String source) {
ReaderContext readerContext = new ReaderContext(
newContextId(), indexService, indexShard, searcherSupplier, randomNonNegativeLong(), false);
DefaultSearchContext context = new DefaultSearchContext(readerContext, shardSearchRequest, target, null,
bigArrays, null, timeout, null, false);
timeout, null, false);

assertThat(context.searcher().hasCancellations(), is(false));
context.searcher().addQueryCancellation(() -> {});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,10 @@
import org.elasticsearch.search.aggregations.AggregatorTestCase;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.aggregations.MultiBucketConsumerService.MultiBucketConsumer;
import org.elasticsearch.search.aggregations.support.AggregationContext.ProductionAggregationContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.aggregations.support.AggregationContext;

import java.io.IOException;

import static org.mockito.Mockito.mock;

public class BucketsAggregatorTests extends AggregatorTestCase{

public BucketsAggregator buildMergeAggregator() throws IOException{
Expand All @@ -53,12 +49,11 @@ public BucketsAggregator buildMergeAggregator() throws IOException{
try (IndexReader indexReader = DirectoryReader.open(directory)) {
IndexSearcher indexSearcher = new IndexSearcher(indexReader);

SearchContext searchContext = createSearchContext(
AggregationContext context = createAggregationContext(
indexSearcher,
null,
new NumberFieldMapper.NumberFieldType("test", NumberFieldMapper.NumberType.INTEGER)
);
ProductionAggregationContext context = new ProductionAggregationContext(searchContext, mock(MultiBucketConsumer.class));

return new BucketsAggregator("test", AggregatorFactories.EMPTY, context, null, null, null) {
@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.query.QueryBuilder;
Expand Down Expand Up @@ -62,6 +63,7 @@ public void testFilterSizeLimitation() throws Exception {
IndexSettings indexSettings = new IndexSettings(indexMetadata, Settings.EMPTY);
when(indexShard.indexSettings()).thenReturn(indexSettings);
when(queryShardContext.getIndexSettings()).thenReturn(indexSettings);
when(queryShardContext.bigArrays()).thenReturn(BigArrays.NON_RECYCLING_INSTANCE);
when(indexShard.shardId()).thenReturn(new ShardId(new Index("test", "test"), 1));
SearchContext context = new TestSearchContext(queryShardContext, indexShard);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@
import org.elasticsearch.search.aggregations.BucketOrder;
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper;
import org.elasticsearch.search.internal.SearchContext;
import org.hamcrest.Matcher;

import java.io.IOException;
Expand Down Expand Up @@ -1210,7 +1210,7 @@ private void aggregationImplementationChoiceTestCase(
);
}
try (IndexReader reader = indexWriter.getReader()) {
SearchContext context = createSearchContext(new IndexSearcher(reader), new MatchAllDocsQuery(), ft);
AggregationContext context = createAggregationContext(new IndexSearcher(reader), new MatchAllDocsQuery(), ft);
Aggregator agg = createAggregator(builder, context);
Matcher<Aggregator> matcher = instanceOf(DateHistogramAggregator.FromDateRange.class);
if (usesFromRange == false) {
Expand Down
Loading

0 comments on commit 3e45318

Please sign in to comment.