Skip to content

Commit

Permalink
Checkstyle shadows vars pt10 (elastic#81361)
Browse files Browse the repository at this point in the history
Backport of elastic#81361.

Part of elastic#19752. Fix more instances where local variable names were
shadowing field names.
  • Loading branch information
pugnascotia committed Dec 7, 2021
1 parent 8305dbd commit 6df6380
Show file tree
Hide file tree
Showing 54 changed files with 275 additions and 238 deletions.
9 changes: 8 additions & 1 deletion build-tools-internal/src/main/resources/checkstyle.xml
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,13 @@

<!-- Checks that a local variable or a parameter does not shadow a field that is defined in the same class. -->
<!-- Use a forked version that understands setters that don't start with "set". -->
<!-- Notes on `ignoredMethodNames`:
* `createParser` creates objects so should be considered a sort-of constructor
* `createComponents` by its nature ends up referring to fields
a lot, and there's no benefit to flagging shadowed variables in
those methods.
-->
<!-- Disabled until existing violations are fixed -->
<!--
<module name="org.elasticsearch.gradle.internal.checkstyle.HiddenFieldCheck">
Expand All @@ -107,7 +114,7 @@
<property name="minLineCount" value="5" />
<property name="ignoreFormat" value="^(?:threadPool)$"/>
<property name="ignoreAbstractMethods" value="true"/>
<property name="ignoreMethodNames" value="^(?:createParser)$"/>
<property name="ignoreMethodNames" value="^(?:createParser|createComponents)$"/>
<property name="setterCanReturnItsClass" value="true"/>
<message key="hidden.field" value="''{0}'' hides a field." />
</module>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1090,7 +1090,7 @@ public Collection<Module> createGuiceModules() {

@Override
public List<RestHandler> getRestHandlers(
Settings settings,
Settings unused,
RestController restController,
ClusterSettings clusterSettings,
IndexScopedSettings indexScopedSettings,
Expand Down Expand Up @@ -1264,7 +1264,7 @@ public List<ActionFilter> getActionFilters() {
}

@Override
public List<ExecutorBuilder<?>> getExecutorBuilders(Settings settings) {
public List<ExecutorBuilder<?>> getExecutorBuilders(Settings unused) {
if (false == enabled || transportClientMode) {
return emptyList();
}
Expand Down Expand Up @@ -1414,7 +1414,7 @@ public List<NamedXContentRegistry.Entry> getNamedXContent() {
}

@Override
public Collection<SystemIndexDescriptor> getSystemIndexDescriptors(Settings settings) {
public Collection<SystemIndexDescriptor> getSystemIndexDescriptors(Settings unused) {
return Collections.unmodifiableList(
Arrays.asList(
SystemIndexDescriptor.builder()
Expand Down Expand Up @@ -1695,7 +1695,7 @@ public void cleanUpFeature(
}

@Override
public BreakerSettings getCircuitBreaker(Settings settings) {
public BreakerSettings getCircuitBreaker(Settings settingsToUse) {
return BreakerSettings.updateFromSettings(
new BreakerSettings(
TRAINED_MODEL_CIRCUIT_BREAKER_NAME,
Expand All @@ -1704,7 +1704,7 @@ public BreakerSettings getCircuitBreaker(Settings settings) {
CircuitBreaker.Type.MEMORY,
CircuitBreaker.Durability.TRANSIENT
),
settings
settingsToUse
);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ public void clusterChanged(ClusterChangedEvent event) {

// The atomic flag shortcircuits the check after no legacy templates have been found to exist.
if (this.isMaster && checkForLegacyMlTemplates.get()) {
if (deleteOneMlLegacyTemplateIfNecessary(client, event.state()) == false) {
if (deleteOneMlLegacyTemplateIfNecessary(event.state()) == false) {
checkForLegacyMlTemplates.set(false);
}
}
Expand All @@ -158,7 +158,7 @@ public void clusterChanged(ClusterChangedEvent event) {
* @return <code>true</code> if further calls to this method are worthwhile.
* <code>false</code> if this method never needs to be called again.
*/
private boolean deleteOneMlLegacyTemplateIfNecessary(Client client, ClusterState state) {
private boolean deleteOneMlLegacyTemplateIfNecessary(ClusterState state) {

// Don't delete the legacy templates until the entire cluster is on a version that supports composable templates
if (state.nodes().getMinNodeVersion().before(MlIndexTemplateRegistry.COMPOSABLE_TEMPLATE_SWITCH_VERSION)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,10 @@ protected void doExecute(
TaskId taskId = new TaskId(clusterService.localNode().getId(), task.getId());

BooleanSupplier isTimedOutSupplier = () -> Instant.now(clock).isAfter(timeoutTime);
AnomalyDetectionAuditor auditor = new AnomalyDetectionAuditor(client, clusterService);
AnomalyDetectionAuditor anomalyDetectionAuditor = new AnomalyDetectionAuditor(client, clusterService);

if (Strings.isNullOrEmpty(request.getJobId()) || Strings.isAllOrWildcard(request.getJobId())) {
List<MlDataRemover> dataRemovers = createDataRemovers(client, taskId, auditor);
List<MlDataRemover> dataRemovers = createDataRemovers(client, taskId, anomalyDetectionAuditor);
threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME)
.execute(() -> deleteExpiredData(request, dataRemovers, listener, isTimedOutSupplier));
} else {
Expand All @@ -144,7 +144,7 @@ protected void doExecute(
List<Job> jobs = jobBuilders.stream().map(Job.Builder::build).collect(Collectors.toList());
String[] jobIds = jobs.stream().map(Job::getId).toArray(String[]::new);
request.setExpandedJobIds(jobIds);
List<MlDataRemover> dataRemovers = createDataRemovers(jobs, taskId, auditor);
List<MlDataRemover> dataRemovers = createDataRemovers(jobs, taskId, anomalyDetectionAuditor);
deleteExpiredData(request, dataRemovers, listener, isTimedOutSupplier);
});
}, listener::onFailure));
Expand Down Expand Up @@ -232,53 +232,57 @@ void deleteExpiredData(
}
}

private List<MlDataRemover> createDataRemovers(OriginSettingClient client, TaskId parentTaskId, AnomalyDetectionAuditor auditor) {
private List<MlDataRemover> createDataRemovers(
OriginSettingClient originClient,
TaskId parentTaskId,
AnomalyDetectionAuditor anomalyDetectionAuditor
) {
return Arrays.asList(
new ExpiredResultsRemover(
client,
new WrappedBatchedJobsIterator(new SearchAfterJobsIterator(client)),
originClient,
new WrappedBatchedJobsIterator(new SearchAfterJobsIterator(originClient)),
parentTaskId,
auditor,
anomalyDetectionAuditor,
threadPool
),
new ExpiredForecastsRemover(client, threadPool, parentTaskId),
new ExpiredForecastsRemover(originClient, threadPool, parentTaskId),
new ExpiredModelSnapshotsRemover(
client,
new WrappedBatchedJobsIterator(new SearchAfterJobsIterator(client)),
originClient,
new WrappedBatchedJobsIterator(new SearchAfterJobsIterator(originClient)),
threadPool,
parentTaskId,
jobResultsProvider,
auditor
anomalyDetectionAuditor
),
new UnusedStateRemover(client, clusterService, parentTaskId),
new EmptyStateIndexRemover(client, parentTaskId),
new UnusedStatsRemover(client, parentTaskId),
new UnusedStateRemover(originClient, clusterService, parentTaskId),
new EmptyStateIndexRemover(originClient, parentTaskId),
new UnusedStatsRemover(originClient, parentTaskId),
new ExpiredAnnotationsRemover(
client,
new WrappedBatchedJobsIterator(new SearchAfterJobsIterator(client)),
originClient,
new WrappedBatchedJobsIterator(new SearchAfterJobsIterator(originClient)),
parentTaskId,
auditor,
anomalyDetectionAuditor,
threadPool
)
);
}

private List<MlDataRemover> createDataRemovers(List<Job> jobs, TaskId parentTaskId, AnomalyDetectionAuditor auditor) {
private List<MlDataRemover> createDataRemovers(List<Job> jobs, TaskId parentTaskId, AnomalyDetectionAuditor anomalyDetectionAuditor) {
return Arrays.asList(
new ExpiredResultsRemover(client, new VolatileCursorIterator<>(jobs), parentTaskId, auditor, threadPool),
new ExpiredResultsRemover(client, new VolatileCursorIterator<>(jobs), parentTaskId, anomalyDetectionAuditor, threadPool),
new ExpiredForecastsRemover(client, threadPool, parentTaskId),
new ExpiredModelSnapshotsRemover(
client,
new VolatileCursorIterator<>(jobs),
threadPool,
parentTaskId,
jobResultsProvider,
auditor
anomalyDetectionAuditor
),
new UnusedStateRemover(client, clusterService, parentTaskId),
new EmptyStateIndexRemover(client, parentTaskId),
new UnusedStatsRemover(client, parentTaskId),
new ExpiredAnnotationsRemover(client, new VolatileCursorIterator<>(jobs), parentTaskId, auditor, threadPool)
new ExpiredAnnotationsRemover(client, new VolatileCursorIterator<>(jobs), parentTaskId, anomalyDetectionAuditor, threadPool)
);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -689,7 +689,7 @@ protected AllocatedPersistentTask createTask(
public PersistentTasksCustomMetadata.Assignment getAssignment(
TaskParams params,
Collection<DiscoveryNode> candidateNodes,
ClusterState clusterState
@SuppressWarnings("HiddenField") ClusterState clusterState
) {
boolean isMemoryTrackerRecentlyRefreshed = memoryTracker.isRecentlyRefreshed();
Optional<PersistentTasksCustomMetadata.Assignment> optionalAssignment = getPotentialAssignment(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,15 +97,15 @@ protected Aggregator createInternal(Aggregator parent, CardinalityUpperBound car
+ "]"
);
}
TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds(this.bucketCountThresholds);
if (bucketCountThresholds.getShardSize() == CategorizeTextAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) {
TermsAggregator.BucketCountThresholds thresholds = new TermsAggregator.BucketCountThresholds(this.bucketCountThresholds);
if (thresholds.getShardSize() == CategorizeTextAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) {
// The user has not made a shardSize selection. Use default
// heuristic to avoid any wrong-ranking caused by distributed
// counting
// TODO significant text does a 2x here, should we as well?
bucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize()));
thresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(thresholds.getRequiredSize()));
}
bucketCountThresholds.ensureValidity();
thresholds.ensureValidity();

return new CategorizeTextAggregator(
name,
Expand All @@ -114,7 +114,7 @@ protected Aggregator createInternal(Aggregator parent, CardinalityUpperBound car
parent,
indexedFieldName,
fieldType,
bucketCountThresholds,
thresholds,
maxUniqueTokens,
maxMatchTokens,
similarityThreshold,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,14 @@ public long getDocCount() {
return docCount;
}

public Bucket reduce(BucketKey key, ReduceContext reduceContext) {
public Bucket reduce(BucketKey bucketKey, ReduceContext reduceContext) {
List<InternalAggregations> innerAggs = new ArrayList<>(toReduce.size());
long docCount = 0;
long totalDocCount = 0;
for (Bucket bucket : toReduce) {
innerAggs.add(bucket.aggregations);
docCount += bucket.docCount;
totalDocCount += bucket.docCount;
}
return new Bucket(key, docCount, InternalAggregations.reduce(innerAggs, reduceContext));
return new Bucket(bucketKey, totalDocCount, InternalAggregations.reduce(innerAggs, reduceContext));
}

public DelayedCategorizationBucket add(Bucket bucket) {
Expand Down Expand Up @@ -339,7 +339,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th
}

@Override
public InternalCategorizationAggregation create(List<Bucket> buckets) {
public InternalCategorizationAggregation create(List<Bucket> bucketList) {
return new InternalCategorizationAggregation(
name,
requiredSize,
Expand All @@ -348,7 +348,7 @@ public InternalCategorizationAggregation create(List<Bucket> buckets) {
maxMatchTokens,
similarityThreshold,
super.metadata,
buckets
bucketList
);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -249,11 +249,11 @@ public InferencePipelineAggregationBuilder rewrite(QueryRewriteContext context)

SetOnce<LocalModel> loadedModel = new SetOnce<>();
BiConsumer<Client, ActionListener<?>> modelLoadAction = (client, listener) -> modelLoadingService.get()
.getModelForSearch(modelId, listener.delegateFailure((delegate, model) -> {
loadedModel.set(model);
.getModelForSearch(modelId, listener.delegateFailure((delegate, localModel) -> {
loadedModel.set(localModel);

boolean isLicensed = MachineLearningField.ML_API_FEATURE.check(licenseState)
|| licenseState.isAllowedByLicense(model.getLicenseLevel());
|| licenseState.isAllowedByLicense(localModel.getLicenseLevel());
if (isLicensed) {
delegate.onResponse(null);
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ public InternalAggregation doReduce(Aggregations aggregations, InternalAggregati
);
}
final MlAggsHelper.DoubleBucketValues bucketsValue = maybeBucketsValue.get();
double[] fractions = this.fractions == null
double[] fractionsArr = this.fractions == null
? DoubleStream.concat(
DoubleStream.of(0.0),
Stream.generate(() -> 1.0 / (bucketsValue.getDocCounts().length - 1))
Expand All @@ -258,6 +258,6 @@ public InternalAggregation doReduce(Aggregations aggregations, InternalAggregati
// We prepend zero to the fractions as we prepend 0 to the doc counts and we want them to be the same length when
// we create the monotonically increasing values for distribution comparison.
: DoubleStream.concat(DoubleStream.of(0.0), Arrays.stream(this.fractions)).toArray();
return new InternalKSTestAggregation(name(), metadata(), ksTest(fractions, bucketsValue, alternatives, samplingMethod));
return new InternalKSTestAggregation(name(), metadata(), ksTest(fractionsArr, bucketsValue, alternatives, samplingMethod));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -559,15 +559,15 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider
if (nodeLoads.size() > 1) {
long totalAssignedJobs = nodeLoads.stream().mapToLong(NodeLoad::getNumAssignedJobs).sum();
// one volatile read
long maxOpenJobs = this.maxOpenJobs;
if (totalAssignedJobs > maxOpenJobs) {
long maxOpenJobsCopy = this.maxOpenJobs;
if (totalAssignedJobs > maxOpenJobsCopy) {
String msg = String.format(
Locale.ROOT,
"not scaling down as the total number of jobs [%d] exceeds the setting [%s (%d)]. "
+ " To allow a scale down [%s] must be increased.",
totalAssignedJobs,
MAX_OPEN_JOBS_PER_NODE.getKey(),
maxOpenJobs,
maxOpenJobsCopy,
MAX_OPEN_JOBS_PER_NODE.getKey()
);
logger.info(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,7 @@ void markScrollAsErrored() {
searchHasShardFailure = true;
}

@SuppressWarnings("HiddenField")
protected SearchResponse executeSearchScrollRequest(String scrollId) {
return ClientHelper.executeWithHeaders(
context.headers,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,12 +77,10 @@ public static void create(
ActionListener<DataExtractorFactory> listener
) {

// Step 2. Contruct the factory and notify listener
// Step 2. Construct the factory and notify listener
ActionListener<FieldCapabilitiesResponse> fieldCapabilitiesHandler = ActionListener.wrap(fieldCapabilitiesResponse -> {
TimeBasedExtractedFields extractedFields = TimeBasedExtractedFields.build(job, datafeed, fieldCapabilitiesResponse);
listener.onResponse(
new ScrollDataExtractorFactory(client, datafeed, job, extractedFields, xContentRegistry, timingStatsReporter)
);
TimeBasedExtractedFields fields = TimeBasedExtractedFields.build(job, datafeed, fieldCapabilitiesResponse);
listener.onResponse(new ScrollDataExtractorFactory(client, datafeed, job, fields, xContentRegistry, timingStatsReporter));
}, e -> {
Throwable cause = ExceptionsHelper.unwrapCause(e);
if (cause instanceof IndexNotFoundException) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ public void execute(DataFrameAnalyticsTask task, ClusterState clusterState, Time
}

private void createStatsIndexAndUpdateMappingsIfNecessary(
Client client,
Client clientToUse,
ClusterState clusterState,
TimeValue masterNodeTimeout,
ActionListener<Boolean> listener
Expand All @@ -162,15 +162,21 @@ private void createStatsIndexAndUpdateMappingsIfNecessary(
aBoolean -> ElasticsearchMappings.addDocMappingIfMissing(
MlStatsIndex.writeAlias(),
MlStatsIndex::wrappedMapping,
client,
clientToUse,
clusterState,
masterNodeTimeout,
listener
),
listener::onFailure
);

MlStatsIndex.createStatsIndexAndAliasIfNecessary(client, clusterState, expressionResolver, masterNodeTimeout, createIndexListener);
MlStatsIndex.createStatsIndexAndAliasIfNecessary(
clientToUse,
clusterState,
expressionResolver,
masterNodeTimeout,
createIndexListener
);
}

private void determineProgressAndResume(DataFrameAnalyticsTask task, DataFrameAnalyticsConfig config) {
Expand Down
Loading

0 comments on commit 6df6380

Please sign in to comment.