Skip to content

Commit

Permalink
remove unsued variable
Browse files Browse the repository at this point in the history
  • Loading branch information
howardhuanghua committed Oct 28, 2019
1 parent 0df4cf4 commit 998ff15
Show file tree
Hide file tree
Showing 4 changed files with 11 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
Expand Down Expand Up @@ -205,7 +206,8 @@ public SearchService(ClusterService clusterService, IndicesService indicesServic
this.bigArrays = bigArrays;
this.queryPhase = new QueryPhase();
this.fetchPhase = fetchPhase;
this.multiBucketConsumerService = new MultiBucketConsumerService(clusterService, settings, circuitBreakerService);
this.multiBucketConsumerService = new MultiBucketConsumerService(clusterService, settings,
circuitBreakerService.getBreaker(CircuitBreaker.REQUEST));

TimeValue keepAliveInterval = KEEPALIVE_INTERVAL_SETTING.get(settings);
setKeepAlives(DEFAULT_KEEPALIVE_SETTING.get(settings), MAX_KEEPALIVE_SETTING.get(settings));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;

Expand All @@ -40,20 +39,15 @@
*/
public class MultiBucketConsumerService {
public static final int DEFAULT_MAX_BUCKETS = 10000;
public static final int DEFAULT_CHECK_BUCKETS_STEP_SIZE = 1000;
public static final Setting<Integer> MAX_BUCKET_SETTING =
Setting.intSetting("search.max_buckets", DEFAULT_MAX_BUCKETS, 0, Setting.Property.NodeScope, Setting.Property.Dynamic);

public static final Setting<Integer> CHECK_BUCKETS_STEP_SIZE_SETTING =
Setting.intSetting("search.check_buckets_step_size", DEFAULT_CHECK_BUCKETS_STEP_SIZE,
-1, Setting.Property.NodeScope, Setting.Property.Dynamic);

private final CircuitBreakerService circuitBreakerService;
private final CircuitBreaker breaker;

private volatile int maxBucket;

public MultiBucketConsumerService(ClusterService clusterService, Settings settings, CircuitBreakerService circuitBreakerService) {
this.circuitBreakerService = circuitBreakerService;
public MultiBucketConsumerService(ClusterService clusterService, Settings settings, CircuitBreaker breaker) {
this.breaker = breaker;
this.maxBucket = MAX_BUCKET_SETTING.get(settings);
clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_BUCKET_SETTING, this::setMaxBucket);
}
Expand Down Expand Up @@ -123,6 +117,7 @@ public void accept(int value) {
MAX_BUCKET_SETTING.getKey() + "] cluster level setting.", limit);
}

// check parent circuit breaker every 1024 buckets
if (value > 0 && (count & 0x3FF) == 0) {
breaker.addEstimateBytesAndMaybeBreak(0, "allocated_buckets");
}
Expand All @@ -142,6 +137,6 @@ public int getLimit() {
}

public MultiBucketConsumer create() {
return new MultiBucketConsumer(maxBucket, circuitBreakerService.getBreaker(CircuitBreaker.REQUEST));
return new MultiBucketConsumer(maxBucket, breaker);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@
import java.util.stream.Collectors;

import static org.elasticsearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS;
import static org.elasticsearch.test.InternalAggregationTestCase.DEFAULT_CHECK_BUCKETS_STEP_SIZE;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.doAnswer;
Expand Down Expand Up @@ -197,7 +196,7 @@ public boolean shouldCache(Query query) {
when(searchContext.numberOfShards()).thenReturn(1);
when(searchContext.searcher()).thenReturn(contextIndexSearcher);
when(searchContext.fetchPhase())
.thenReturn(new FetchPhase(Arrays.asList(new FetchSourceSubPhase(), new DocValueFieldsFetchSubPhase())));
.thenReturn(new FetchPhase(Arrays.asList(new FetchSourceSubPhase(), new DocValueFieldsFetchSubPhase())));
when(searchContext.bitsetFilterCache()).thenReturn(new BitsetFilterCache(indexSettings, mock(Listener.class)));
CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService();
IndexShard indexShard = mock(IndexShard.class);
Expand Down Expand Up @@ -355,13 +354,13 @@ protected <A extends InternalAggregation, C extends Aggregator> A searchAndReduc
final CompositeReaderContext compCTX = (CompositeReaderContext) ctx;
final int size = compCTX.leaves().size();
subSearchers = new ShardSearcher[size];
for (int searcherIDX = 0; searcherIDX < subSearchers.length; searcherIDX++) {
for(int searcherIDX=0;searcherIDX<subSearchers.length;searcherIDX++) {
final LeafReaderContext leave = compCTX.leaves().get(searcherIDX);
subSearchers[searcherIDX] = new ShardSearcher(leave, compCTX);
}
}

List<InternalAggregation> aggs = new ArrayList<>();
List<InternalAggregation> aggs = new ArrayList<> ();
Query rewritten = searcher.rewrite(query);
Weight weight = searcher.createWeight(rewritten, ScoreMode.COMPLETE, 1f);
MultiBucketConsumer bucketConsumer = new MultiBucketConsumer(maxBucket,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,6 @@

public abstract class InternalAggregationTestCase<T extends InternalAggregation> extends AbstractWireSerializingTestCase<T> {
public static final int DEFAULT_MAX_BUCKETS = 100000;
public static final int DEFAULT_CHECK_BUCKETS_STEP_SIZE = 100000;
protected static final double TOLERANCE = 1e-10;

private static final Comparator<InternalAggregation> INTERNAL_AGG_COMPARATOR = (agg1, agg2) -> {
Expand Down

0 comments on commit 998ff15

Please sign in to comment.