Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Checkstyle shadows vars pt10 #81361

Merged
merged 25 commits into from
Dec 7, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
aad38ec
Fix shadowed vars - add ignoreMethodNamesRegex to HiddenFieldCheck
pugnascotia Nov 23, 2021
4ae30f3
Fix shadowed vars - add ignoreConstructorBody to HiddenFieldCheck
pugnascotia Nov 24, 2021
201d517
Fix shadowed vars
pugnascotia Nov 24, 2021
652380e
Merge remote-tracking branch 'upstream/master' into checkstyle-shadow…
pugnascotia Nov 24, 2021
5ef56b0
Fix shadowed vars
pugnascotia Nov 24, 2021
3c76892
Fix more shadowed vars + HiddenField improvements
pugnascotia Nov 24, 2021
a6bef71
Bunch of fixes
pugnascotia Nov 24, 2021
04e4fa1
Fix shadowed vars pt9
pugnascotia Nov 24, 2021
f2c8398
Fix shadowed vars pt9
pugnascotia Nov 24, 2021
067802b
Fix shadowed vars pt9
pugnascotia Nov 25, 2021
f40d206
Fix shadowed vars pt9
pugnascotia Nov 25, 2021
e3d4483
Fix shadowed vars pt9
pugnascotia Nov 25, 2021
0f80a18
Fix shadowed vars pt9
pugnascotia Nov 25, 2021
91e9d10
Fix shadowed vars pt10
pugnascotia Nov 29, 2021
f8c3253
Fix shadowed vars pt10
pugnascotia Nov 29, 2021
20c9659
Fix shadowed vars pt10
pugnascotia Nov 29, 2021
a0c64a6
Fix shadowed vars pt10
pugnascotia Nov 29, 2021
16f81da
Merge remote-tracking branch 'upstream/master' into checkstyle-shadow…
pugnascotia Dec 2, 2021
c2bfd21
Merge branch 'checkstyle-shadows-vars-pt9' into checkstyle-shadows-va…
pugnascotia Dec 3, 2021
4954650
Commit checkstyle changes
pugnascotia Dec 3, 2021
25c27f0
Merge remote-tracking branch 'upstream/master' into checkstyle-shadow…
pugnascotia Dec 3, 2021
7bfd6eb
Merge remote-tracking branch 'upstream/master' into checkstyle-shadow…
pugnascotia Dec 6, 2021
b641d6d
Tweaks
pugnascotia Dec 6, 2021
43d4d2f
Checkstyle shadows vars pt10
pugnascotia Dec 6, 2021
ff37eef
Formatting
pugnascotia Dec 6, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 8 additions & 1 deletion build-tools-internal/src/main/resources/checkstyle.xml
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,13 @@

<!-- Checks that a local variable or a parameter does not shadow a field that is defined in the same class. -->
<!-- Use a forked version that understands setters that don't start with "set". -->
<!-- Notes on `ignoredMethodNames`:

* `createParser` creates objects so should be considered a sort-of constructor
* `createComponents` by its nature ends up referring to fields
a lot, and there's no benefit to flagging shadowed variables in
those methods.
-->
<!-- Disabled until existing violations are fixed -->
<!--
<module name="org.elasticsearch.gradle.internal.checkstyle.HiddenFieldCheck">
Expand All @@ -117,7 +124,7 @@
<property name="minLineCount" value="5" />
<property name="ignoreFormat" value="^(?:threadPool)$"/>
<property name="ignoreAbstractMethods" value="true"/>
<property name="ignoreMethodNames" value="^(?:createParser)$"/>
<property name="ignoreMethodNames" value="^(?:createParser|createComponents)$"/>
<property name="setterCanReturnItsClass" value="true"/>
<message key="hidden.field" value="''{0}'' hides a field." />
</module>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1114,7 +1114,7 @@ public List<PersistentTasksExecutor<?>> getPersistentTasksExecutor(

@Override
public List<RestHandler> getRestHandlers(
Settings settings,
Settings unused,
RestController restController,
ClusterSettings clusterSettings,
IndexScopedSettings indexScopedSettings,
Expand Down Expand Up @@ -1306,7 +1306,7 @@ public List<ActionFilter> getActionFilters() {
}

@Override
public List<ExecutorBuilder<?>> getExecutorBuilders(Settings settings) {
public List<ExecutorBuilder<?>> getExecutorBuilders(Settings unused) {
if (false == enabled) {
return emptyList();
}
Expand Down Expand Up @@ -1496,7 +1496,7 @@ public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
}

@Override
public Collection<SystemIndexDescriptor> getSystemIndexDescriptors(Settings settings) {
public Collection<SystemIndexDescriptor> getSystemIndexDescriptors(Settings unused) {
return List.of(
SystemIndexDescriptor.builder()
.setIndexPattern(MlMetaIndex.indexName() + "*")
Expand Down Expand Up @@ -1799,7 +1799,7 @@ public void cleanUpFeature(
}

@Override
public BreakerSettings getCircuitBreaker(Settings settings) {
public BreakerSettings getCircuitBreaker(Settings settingsToUse) {
return BreakerSettings.updateFromSettings(
new BreakerSettings(
TRAINED_MODEL_CIRCUIT_BREAKER_NAME,
Expand All @@ -1808,7 +1808,7 @@ public BreakerSettings getCircuitBreaker(Settings settings) {
CircuitBreaker.Type.MEMORY,
CircuitBreaker.Durability.TRANSIENT
),
settings
settingsToUse
);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ public void clusterChanged(ClusterChangedEvent event) {

// The atomic flag shortcircuits the check after no legacy templates have been found to exist.
if (this.isMaster && checkForLegacyMlTemplates.get()) {
if (deleteOneMlLegacyTemplateIfNecessary(client, event.state()) == false) {
if (deleteOneMlLegacyTemplateIfNecessary(event.state()) == false) {
checkForLegacyMlTemplates.set(false);
}
}
Expand All @@ -189,7 +189,7 @@ public void clusterChanged(ClusterChangedEvent event) {
* @return <code>true</code> if further calls to this method are worthwhile.
* <code>false</code> if this method never needs to be called again.
*/
private boolean deleteOneMlLegacyTemplateIfNecessary(Client client, ClusterState state) {
private boolean deleteOneMlLegacyTemplateIfNecessary(ClusterState state) {

String templateToDelete = nextTemplateToDelete(state.getMetadata().getTemplates());
if (templateToDelete != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,10 @@ protected void doExecute(
TaskId taskId = new TaskId(clusterService.localNode().getId(), task.getId());

BooleanSupplier isTimedOutSupplier = () -> Instant.now(clock).isAfter(timeoutTime);
AnomalyDetectionAuditor auditor = new AnomalyDetectionAuditor(client, clusterService);
AnomalyDetectionAuditor anomalyDetectionAuditor = new AnomalyDetectionAuditor(client, clusterService);

if (Strings.isNullOrEmpty(request.getJobId()) || Strings.isAllOrWildcard(request.getJobId())) {
List<MlDataRemover> dataRemovers = createDataRemovers(client, taskId, auditor);
List<MlDataRemover> dataRemovers = createDataRemovers(client, taskId, anomalyDetectionAuditor);
threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME)
.execute(() -> deleteExpiredData(request, dataRemovers, listener, isTimedOutSupplier));
} else {
Expand All @@ -144,7 +144,7 @@ protected void doExecute(
List<Job> jobs = jobBuilders.stream().map(Job.Builder::build).collect(Collectors.toList());
String[] jobIds = jobs.stream().map(Job::getId).toArray(String[]::new);
request.setExpandedJobIds(jobIds);
List<MlDataRemover> dataRemovers = createDataRemovers(jobs, taskId, auditor);
List<MlDataRemover> dataRemovers = createDataRemovers(jobs, taskId, anomalyDetectionAuditor);
deleteExpiredData(request, dataRemovers, listener, isTimedOutSupplier);
});
}, listener::onFailure));
Expand Down Expand Up @@ -232,53 +232,57 @@ void deleteExpiredData(
}
}

private List<MlDataRemover> createDataRemovers(OriginSettingClient client, TaskId parentTaskId, AnomalyDetectionAuditor auditor) {
private List<MlDataRemover> createDataRemovers(
OriginSettingClient originClient,
TaskId parentTaskId,
AnomalyDetectionAuditor anomalyDetectionAuditor
) {
return Arrays.asList(
new ExpiredResultsRemover(
client,
new WrappedBatchedJobsIterator(new SearchAfterJobsIterator(client)),
originClient,
new WrappedBatchedJobsIterator(new SearchAfterJobsIterator(originClient)),
parentTaskId,
auditor,
anomalyDetectionAuditor,
threadPool
),
new ExpiredForecastsRemover(client, threadPool, parentTaskId),
new ExpiredForecastsRemover(originClient, threadPool, parentTaskId),
new ExpiredModelSnapshotsRemover(
client,
new WrappedBatchedJobsIterator(new SearchAfterJobsIterator(client)),
originClient,
new WrappedBatchedJobsIterator(new SearchAfterJobsIterator(originClient)),
threadPool,
parentTaskId,
jobResultsProvider,
auditor
anomalyDetectionAuditor
),
new UnusedStateRemover(client, parentTaskId),
new EmptyStateIndexRemover(client, parentTaskId),
new UnusedStatsRemover(client, parentTaskId),
new UnusedStateRemover(originClient, parentTaskId),
new EmptyStateIndexRemover(originClient, parentTaskId),
new UnusedStatsRemover(originClient, parentTaskId),
new ExpiredAnnotationsRemover(
client,
new WrappedBatchedJobsIterator(new SearchAfterJobsIterator(client)),
originClient,
new WrappedBatchedJobsIterator(new SearchAfterJobsIterator(originClient)),
parentTaskId,
auditor,
anomalyDetectionAuditor,
threadPool
)
);
}

private List<MlDataRemover> createDataRemovers(List<Job> jobs, TaskId parentTaskId, AnomalyDetectionAuditor auditor) {
private List<MlDataRemover> createDataRemovers(List<Job> jobs, TaskId parentTaskId, AnomalyDetectionAuditor anomalyDetectionAuditor) {
return Arrays.asList(
new ExpiredResultsRemover(client, new VolatileCursorIterator<>(jobs), parentTaskId, auditor, threadPool),
new ExpiredResultsRemover(client, new VolatileCursorIterator<>(jobs), parentTaskId, anomalyDetectionAuditor, threadPool),
new ExpiredForecastsRemover(client, threadPool, parentTaskId),
new ExpiredModelSnapshotsRemover(
client,
new VolatileCursorIterator<>(jobs),
threadPool,
parentTaskId,
jobResultsProvider,
auditor
anomalyDetectionAuditor
),
new UnusedStateRemover(client, parentTaskId),
new EmptyStateIndexRemover(client, parentTaskId),
new UnusedStatsRemover(client, parentTaskId),
new ExpiredAnnotationsRemover(client, new VolatileCursorIterator<>(jobs), parentTaskId, auditor, threadPool)
new ExpiredAnnotationsRemover(client, new VolatileCursorIterator<>(jobs), parentTaskId, anomalyDetectionAuditor, threadPool)
);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -699,7 +699,7 @@ protected AllocatedPersistentTask createTask(
public PersistentTasksCustomMetadata.Assignment getAssignment(
TaskParams params,
Collection<DiscoveryNode> candidateNodes,
ClusterState clusterState
@SuppressWarnings("HiddenField") ClusterState clusterState
) {
boolean isMemoryTrackerRecentlyRefreshed = memoryTracker.isRecentlyRefreshed();
Optional<PersistentTasksCustomMetadata.Assignment> optionalAssignment = getPotentialAssignment(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,15 +97,15 @@ protected Aggregator createInternal(Aggregator parent, CardinalityUpperBound car
+ "]"
);
}
TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds(this.bucketCountThresholds);
if (bucketCountThresholds.getShardSize() == CategorizeTextAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) {
TermsAggregator.BucketCountThresholds thresholds = new TermsAggregator.BucketCountThresholds(this.bucketCountThresholds);
if (thresholds.getShardSize() == CategorizeTextAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) {
// The user has not made a shardSize selection. Use default
// heuristic to avoid any wrong-ranking caused by distributed
// counting
// TODO significant text does a 2x here, should we as well?
bucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize()));
thresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(thresholds.getRequiredSize()));
}
bucketCountThresholds.ensureValidity();
thresholds.ensureValidity();

return new CategorizeTextAggregator(
name,
Expand All @@ -114,7 +114,7 @@ protected Aggregator createInternal(Aggregator parent, CardinalityUpperBound car
parent,
indexedFieldName,
fieldType,
bucketCountThresholds,
thresholds,
maxUniqueTokens,
maxMatchTokens,
similarityThreshold,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,14 @@ public long getDocCount() {
return docCount;
}

public Bucket reduce(BucketKey key, ReduceContext reduceContext) {
public Bucket reduce(BucketKey bucketKey, ReduceContext reduceContext) {
List<InternalAggregations> innerAggs = new ArrayList<>(toReduce.size());
long docCount = 0;
long totalDocCount = 0;
for (Bucket bucket : toReduce) {
innerAggs.add(bucket.aggregations);
docCount += bucket.docCount;
totalDocCount += bucket.docCount;
}
return new Bucket(key, docCount, InternalAggregations.reduce(innerAggs, reduceContext));
return new Bucket(bucketKey, totalDocCount, InternalAggregations.reduce(innerAggs, reduceContext));
}

public DelayedCategorizationBucket add(Bucket bucket) {
Expand Down Expand Up @@ -316,7 +316,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th
}

@Override
public InternalCategorizationAggregation create(List<Bucket> buckets) {
public InternalCategorizationAggregation create(List<Bucket> bucketList) {
return new InternalCategorizationAggregation(
name,
requiredSize,
Expand All @@ -325,7 +325,7 @@ public InternalCategorizationAggregation create(List<Bucket> buckets) {
maxMatchTokens,
similarityThreshold,
super.metadata,
buckets
bucketList
);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -264,10 +264,10 @@ public InferencePipelineAggregationBuilder rewrite(QueryRewriteContext context)

SetOnce<LocalModel> loadedModel = new SetOnce<>();
BiConsumer<Client, ActionListener<?>> modelLoadAction = (client, listener) -> modelLoadingService.get()
.getModelForSearch(modelId, listener.delegateFailure((delegate, model) -> {
loadedModel.set(model);
.getModelForSearch(modelId, listener.delegateFailure((delegate, localModel) -> {
loadedModel.set(localModel);

boolean isLicensed = model.getLicenseLevel() == License.OperationMode.BASIC
boolean isLicensed = localModel.getLicenseLevel() == License.OperationMode.BASIC
|| MachineLearningField.ML_API_FEATURE.check(licenseState);
if (isLicensed) {
delegate.onResponse(null);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ public InternalAggregation doReduce(Aggregations aggregations, InternalAggregati
);
}
final MlAggsHelper.DoubleBucketValues bucketsValue = maybeBucketsValue.get();
double[] fractions = this.fractions == null
double[] fractionsArr = this.fractions == null
? DoubleStream.concat(
DoubleStream.of(0.0),
Stream.generate(() -> 1.0 / (bucketsValue.getDocCounts().length - 1))
Expand All @@ -258,6 +258,6 @@ public InternalAggregation doReduce(Aggregations aggregations, InternalAggregati
// We prepend zero to the fractions as we prepend 0 to the doc counts and we want them to be the same length when
// we create the monotonically increasing values for distribution comparison.
: DoubleStream.concat(DoubleStream.of(0.0), Arrays.stream(this.fractions)).toArray();
return new InternalKSTestAggregation(name(), metadata(), ksTest(fractions, bucketsValue, alternatives, samplingMethod));
return new InternalKSTestAggregation(name(), metadata(), ksTest(fractionsArr, bucketsValue, alternatives, samplingMethod));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -604,15 +604,15 @@ public AutoscalingDeciderResult scale(Settings configuration, AutoscalingDecider
if (nodeLoads.size() > 1) {
long totalAssignedJobs = nodeLoads.stream().mapToLong(NodeLoad::getNumAssignedJobs).sum();
// one volatile read
long maxOpenJobs = this.maxOpenJobs;
if (totalAssignedJobs > maxOpenJobs) {
long maxOpenJobsCopy = this.maxOpenJobs;
if (totalAssignedJobs > maxOpenJobsCopy) {
String msg = String.format(
Locale.ROOT,
"not scaling down as the total number of jobs [%d] exceeds the setting [%s (%d)]. "
+ " To allow a scale down [%s] must be increased.",
totalAssignedJobs,
MAX_OPEN_JOBS_PER_NODE.getKey(),
maxOpenJobs,
maxOpenJobsCopy,
MAX_OPEN_JOBS_PER_NODE.getKey()
);
logger.info(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,7 @@ void markScrollAsErrored() {
searchHasShardFailure = true;
}

@SuppressWarnings("HiddenField")
protected SearchResponse executeSearchScrollRequest(String scrollId) {
return ClientHelper.executeWithHeaders(
context.headers,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ public static void create(
ActionListener<DataExtractorFactory> listener
) {

// Step 2. Contruct the factory and notify listener
// Step 2. Construct the factory and notify listener
ActionListener<FieldCapabilitiesResponse> fieldCapabilitiesHandler = ActionListener.wrap(fieldCapabilitiesResponse -> {
if (fieldCapabilitiesResponse.getIndices().length == 0) {
listener.onFailure(
Expand All @@ -89,10 +89,8 @@ public static void create(
);
return;
}
TimeBasedExtractedFields extractedFields = TimeBasedExtractedFields.build(job, datafeed, fieldCapabilitiesResponse);
listener.onResponse(
new ScrollDataExtractorFactory(client, datafeed, job, extractedFields, xContentRegistry, timingStatsReporter)
);
TimeBasedExtractedFields fields = TimeBasedExtractedFields.build(job, datafeed, fieldCapabilitiesResponse);
listener.onResponse(new ScrollDataExtractorFactory(client, datafeed, job, fields, xContentRegistry, timingStatsReporter));
}, e -> {
Throwable cause = ExceptionsHelper.unwrapCause(e);
if (cause instanceof IndexNotFoundException) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ public void execute(DataFrameAnalyticsTask task, ClusterState clusterState, Time
}

private void createStatsIndexAndUpdateMappingsIfNecessary(
Client client,
Client clientToUse,
ClusterState clusterState,
TimeValue masterNodeTimeout,
ActionListener<Boolean> listener
Expand All @@ -162,15 +162,21 @@ private void createStatsIndexAndUpdateMappingsIfNecessary(
aBoolean -> ElasticsearchMappings.addDocMappingIfMissing(
MlStatsIndex.writeAlias(),
MlStatsIndex::wrappedMapping,
client,
clientToUse,
clusterState,
masterNodeTimeout,
listener
),
listener::onFailure
);

MlStatsIndex.createStatsIndexAndAliasIfNecessary(client, clusterState, expressionResolver, masterNodeTimeout, createIndexListener);
MlStatsIndex.createStatsIndexAndAliasIfNecessary(
clientToUse,
clusterState,
expressionResolver,
masterNodeTimeout,
createIndexListener
);
}

private void determineProgressAndResume(DataFrameAnalyticsTask task, DataFrameAnalyticsConfig config) {
Expand Down
Loading