Skip to content

Commit

Permalink
Changes to keep Checkstyle happy after reformatting (elastic#76464)
Browse files Browse the repository at this point in the history
* Reformatting to keep Checkstyle after formatting

* Configure spotless everywhere, and disable the tasks if necessary

* Add XContentBuilder helpers, fix test

* Tweaks

* Add a TODO

Co-authored-by: Elastic Machine <[email protected]>
  • Loading branch information
pugnascotia and elasticmachine authored Aug 18, 2021
1 parent 86a5772 commit d01efa4
Show file tree
Hide file tree
Showing 67 changed files with 739 additions and 398 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -199,49 +199,52 @@ def projectPathsToExclude = [

subprojects {
plugins.withType(ElasticsearchJavaPlugin).whenPluginAdded {
if (projectPathsToExclude.contains(project.path) == false) {
project.apply plugin: "com.diffplug.spotless"
project.apply plugin: "com.diffplug.spotless"

spotless {
java {
if (project.path == ':server') {
target 'src/*/java/org/elasticsearch/action/admin/cluster/repositories/**/*.java',
'src/*/java/org/elasticsearch/action/admin/cluster/snapshots/**/*.java',
'src/*/java/org/elasticsearch/index/snapshots/**/*.java',
'src/*/java/org/elasticsearch/repositories/**/*.java',
'src/*/java/org/elasticsearch/snapshots/**/*.java'
spotless {
java {
if (project.path == ':server') {
target 'src/*/java/org/elasticsearch/action/admin/cluster/repositories/**/*.java',
'src/*/java/org/elasticsearch/action/admin/cluster/snapshots/**/*.java',
'src/*/java/org/elasticsearch/index/snapshots/**/*.java',
'src/*/java/org/elasticsearch/repositories/**/*.java',
'src/*/java/org/elasticsearch/snapshots/**/*.java'

targetExclude 'src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java'
} else {
// Normally this isn't necessary, but we have Java sources in
// non-standard places
target 'src/**/*.java'
}
targetExclude 'src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java'
} else {
// Normally this isn't necessary, but we have Java sources in
// non-standard places
target 'src/**/*.java'
}

toggleOffOn('@formatter:off', '@formatter:on') // use `@formatter:off` and `@formatter:on` to toggle formatting - ONLY IF STRICTLY NECESSARY
removeUnusedImports()
importOrderFile rootProject.file('build-tools-internal/elastic.importorder')
eclipse().configFile rootProject.file('build-tools-internal/formatterConfig.xml')
trimTrailingWhitespace()
toggleOffOn('@formatter:off', '@formatter:on') // use `@formatter:off` and `@formatter:on` to toggle formatting - ONLY IF STRICTLY NECESSARY
removeUnusedImports()
importOrderFile rootProject.file('build-tools-internal/elastic.importorder')
eclipse().configFile rootProject.file('build-tools-internal/formatterConfig.xml')
trimTrailingWhitespace()

// Sometimes Spotless will report a "misbehaving rule which can't make up its
// mind" and will recommend enabling the `paddedCell()` setting. If you
// enabled this setting and run the format check again,
// Spotless will write files to
// `$PROJECT/build/spotless-diagnose-java/` to aid diagnosis. It writes
// different copies of the formatted files, so that you can see how they
// differ and infer what is the problem.
// Sometimes Spotless will report a "misbehaving rule which can't make up its
// mind" and will recommend enabling the `paddedCell()` setting. If you
// enabled this setting and run the format check again,
// Spotless will write files to
// `$PROJECT/build/spotless-diagnose-java/` to aid diagnosis. It writes
// different copies of the formatted files, so that you can see how they
// differ and infer what is the problem.

// The `paddedCell()` option is disabled for normal operation so that any
// misbehaviour is detected, and not just suppressed. You can enabled the
// option from the command line by running Gradle with `-Dspotless.paddedcell`.
if (providers.systemProperty('spotless.paddedcell').forUseAtConfigurationTime().isPresent()) {
paddedCell()
}
// The `paddedCell()` option is disabled for normal operation so that any
// misbehaviour is detected, and not just suppressed. You can enabled the
// option from the command line by running Gradle with `-Dspotless.paddedcell`.
if (providers.systemProperty('spotless.paddedcell').forUseAtConfigurationTime().isPresent()) {
paddedCell()
}
}
}

tasks.named("precommit").configure { dependsOn 'spotlessJavaCheck' }
if (projectPathsToExclude.contains(project.path)) {
tasks.named('spotlessJavaCheck').configure { enabled = false }
tasks.named('spotlessApply').configure { enabled = false }
}

tasks.named("precommit").configure { dependsOn 'spotlessJavaCheck' }
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
import java.io.IOException;
import java.util.List;

import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;

/**
* This class parses the superset of all possible fields that may be written by
* InferenceResults. The warning field is mutually exclusive with all the other fields.
Expand All @@ -29,8 +31,6 @@
* Boolean or a Double. For regression results {@link #getValue()} is always
* a Double.
*/
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;

public class ParsedInference extends ParsedAggregation {

@SuppressWarnings("unchecked")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -162,25 +162,31 @@ public void setWeight(final double weight) {
this.weight = weight;
}

// @formatter:off
/**
* If the {@link GraphExploreRequest#useSignificance(boolean)} is true (the default)
* this statistic is available.
* @return the number of documents in the index that contain this term (see bg_count in
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html">
* the significant_terms aggregation</a>)
* <a
* href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html"
* >the significant_terms aggregation</a>)
*/
// @formatter:on
public long getBg() {
return bg;
}

// @formatter:off
/**
* If the {@link GraphExploreRequest#useSignificance(boolean)} is true (the default)
* this statistic is available.
* Together with {@link #getBg()} these numbers are used to derive the significance of a term.
* @return the number of documents in the sample of best matching documents that contain this term (see fg_count in
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html">
* the significant_terms aggregation</a>)
* <a
* href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-aggregations-bucket-significantterms-aggregation.html"
* >the significant_terms aggregation</a>)
*/
// @formatter:on
public long getFg() {
return fg;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1269,10 +1269,14 @@ public void testIndexPutSettingNonExistent() throws IOException {
exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest,
highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync));
assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST));
assertThat(exception.getMessage(), equalTo(
assertThat(
exception.getMessage(),
equalTo(
"Elasticsearch exception [type=illegal_argument_exception, "
+ "reason=unknown setting [index.no_idea_what_you_are_talking_about] please check that any required plugins are installed, "
+ "or check the breaking changes documentation for removed settings]"));
+ "reason=unknown setting [index.no_idea_what_you_are_talking_about] please check that any required plugins "
+ "are installed, or check the breaking changes documentation for removed settings]"
)
);
}

@SuppressWarnings("unchecked")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,16 @@
import org.elasticsearch.client.ml.job.results.OverallBucket;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.XContentTestUtils;
import org.junit.After;
import org.junit.Before;

import java.io.IOException;
import java.util.Arrays;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import static org.hamcrest.Matchers.closeTo;
import static org.hamcrest.Matchers.equalTo;
Expand Down Expand Up @@ -138,55 +141,133 @@ private void addCategoriesIndexRequests(BulkRequest bulkRequest) {
}
}

private void addModelSnapshotIndexRequests(BulkRequest bulkRequest) {
private void addModelSnapshotIndexRequests(BulkRequest bulkRequest) throws IOException {
// Index a number of model snapshots, one of which contains the new model_size_stats fields
// 'model_bytes_exceeded' and 'model_bytes_memory_limit' that were introduced in 7.2.0.
// We want to verify that we can parse the snapshots whether or not these fields are present.
{
IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX);
indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"timestamp\":1541587919000, " +
"\"description\":\"State persisted due to job close at 2018-11-07T10:51:59+0000\", \"snapshot_id\":\"1541587919\"," +
"\"snapshot_doc_count\":1, \"model_size_stats\":{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"model_size_stats\"," +
"\"model_bytes\":51722, \"peak_model_bytes\":61322, \"model_bytes_exceeded\":10762, \"model_bytes_memory_limit\":40960," +
"\"total_by_field_count\":3, \"total_over_field_count\":0, \"total_partition_field_count\":2," +
"\"bucket_allocation_failures_count\":0, \"memory_status\":\"ok\", \"log_time\":1541587919000," +
" \"timestamp\":1519930800000},\"latest_record_time_stamp\":1519931700000, \"latest_result_time_stamp\":1519930800000," +
" \"retain\":false }", XContentType.JSON);
Map<String, Object> modelSizeStats = new HashMap<>();
modelSizeStats.put("job_id", JOB_ID);
modelSizeStats.put("result_type", "model_size_stats");
modelSizeStats.put("model_bytes", 51722);
modelSizeStats.put("peak_model_bytes", 61322);
modelSizeStats.put("model_bytes_exceeded", 10762);
modelSizeStats.put("model_bytes_memory_limit", 40960);
modelSizeStats.put("total_by_field_count", 3);
modelSizeStats.put("total_over_field_count", 0);
modelSizeStats.put("total_partition_field_count", 2);
modelSizeStats.put("bucket_allocation_failures_count", 0);
modelSizeStats.put("memory_status", "ok");
modelSizeStats.put("log_time", 1541587919000L);
modelSizeStats.put("timestamp", 1519930800000L);

Map<String, Object> source = new HashMap<>();
source.put("job_id", JOB_ID);
source.put("timestamp", 1541587919000L);
source.put("description", "State persisted due to job close at 2018-11-07T10:51:59+0000");
source.put("snapshot_id", "1541587919");
source.put("snapshot_doc_count", 1);
source.put("latest_record_time_stamp", 1519931700000L);
source.put("latest_result_time_stamp", 1519930800000L);
source.put("retain", false);
source.put("model_size_stats", modelSizeStats);

indexRequest.source(XContentTestUtils.convertToXContent(source, XContentType.JSON), XContentType.JSON);
bulkRequest.add(indexRequest);
}
// Also index one that contains 'memory_assignment_basis', which was added in 7.11
{
IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX);
indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"timestamp\":1541587929000, " +
"\"description\":\"State persisted due to job close at 2018-11-07T10:52:09+0000\", \"snapshot_id\":\"1541587929\"," +
"\"snapshot_doc_count\":1, \"model_size_stats\":{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"model_size_stats\"," +
"\"model_bytes\":51722, \"peak_model_bytes\":61322, \"model_bytes_exceeded\":10762, \"model_bytes_memory_limit\":40960," +
"\"total_by_field_count\":3, \"total_over_field_count\":0, \"total_partition_field_count\":2," +
"\"bucket_allocation_failures_count\":0, \"memory_status\":\"ok\", \"assignment_memory_basis\":\"model_memory_limit\"," +
" \"log_time\":1541587929000, \"timestamp\":1519930800000},\"latest_record_time_stamp\":1519931700000," +
"\"latest_result_time_stamp\":1519930800000, \"retain\":false }", XContentType.JSON);

Map<String, Object> modelSizeStats = new HashMap<>();
modelSizeStats.put("job_id", JOB_ID);
modelSizeStats.put("result_type", "model_size_stats");
modelSizeStats.put("model_bytes", 51722);
modelSizeStats.put("peak_model_bytes", 61322);
modelSizeStats.put("model_bytes_exceeded", 10762);
modelSizeStats.put("model_bytes_memory_limit", 40960);
modelSizeStats.put("total_by_field_count", 3);
modelSizeStats.put("total_over_field_count", 0);
modelSizeStats.put("total_partition_field_count", 2);
modelSizeStats.put("bucket_allocation_failures_count", 0);
modelSizeStats.put("memory_status", "ok");
modelSizeStats.put("assignment_memory_basis", "model_memory_limit");
modelSizeStats.put("log_time", 1541587929000L);
modelSizeStats.put("timestamp", 1519930800000L);

Map<String, Object> source = new HashMap<>();
source.put("job_id", JOB_ID);
source.put("timestamp", 1541587929000L);
source.put("description", "State persisted due to job close at 2018-11-07T10:52:09+0000");
source.put("snapshot_id", "1541587929");
source.put("snapshot_doc_count", 1);
source.put("latest_record_time_stamp", 1519931700000L);
source.put("latest_result_time_stamp", 1519930800000L);
source.put("retain", false);
source.put("model_size_stats", modelSizeStats);

indexRequest.source(XContentTestUtils.convertToXContent(source, XContentType.JSON), XContentType.JSON);
bulkRequest.add(indexRequest);
}
{
IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX);
indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"timestamp\":1541588919000, " +
"\"description\":\"State persisted due to job close at 2018-11-07T11:08:39+0000\", \"snapshot_id\":\"1541588919\"," +
"\"snapshot_doc_count\":1, \"model_size_stats\":{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"model_size_stats\"," +
"\"model_bytes\":51722, \"peak_model_bytes\":61322, \"total_by_field_count\":3, \"total_over_field_count\":0," +
"\"total_partition_field_count\":2,\"bucket_allocation_failures_count\":0, \"memory_status\":\"ok\"," +
"\"log_time\":1541588919000,\"timestamp\":1519930800000},\"latest_record_time_stamp\":1519931700000," +
"\"latest_result_time_stamp\":1519930800000, \"retain\":false }", XContentType.JSON);

Map<String, Object> modelSizeStats = new HashMap<>();
modelSizeStats.put("job_id", JOB_ID);
modelSizeStats.put("result_type", "model_size_stats");
modelSizeStats.put("model_bytes", 51722);
modelSizeStats.put("peak_model_bytes", 61322);
modelSizeStats.put("total_by_field_count", 3);
modelSizeStats.put("total_over_field_count", 0);
modelSizeStats.put("total_partition_field_count", 2);
modelSizeStats.put("bucket_allocation_failures_count", 0);
modelSizeStats.put("memory_status", "ok");
modelSizeStats.put("log_time", 1541588919000L);
modelSizeStats.put("timestamp", 1519930800000L);

Map<String, Object> source = new HashMap<>();
source.put("job_id", JOB_ID);
source.put("timestamp", 1541588919000L);
source.put("description", "State persisted due to job close at 2018-11-07T11:08:39+0000");
source.put("snapshot_id", "1541588919");
source.put("snapshot_doc_count", 1);
source.put("latest_record_time_stamp", 1519931700000L);
source.put("latest_result_time_stamp", 1519930800000L);
source.put("retain", false);
source.put("model_size_stats", modelSizeStats);

indexRequest.source(XContentTestUtils.convertToXContent(source, XContentType.JSON), XContentType.JSON);
bulkRequest.add(indexRequest);
}
{
IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX);
indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"timestamp\":1541589919000, " +
"\"description\":\"State persisted due to job close at 2018-11-07T11:25:19+0000\", \"snapshot_id\":\"1541589919\"," +
"\"snapshot_doc_count\":1, \"model_size_stats\":{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"model_size_stats\"," +
"\"model_bytes\":51722, \"peak_model_bytes\":61322, \"total_by_field_count\":3, \"total_over_field_count\":0," +
"\"total_partition_field_count\":2,\"bucket_allocation_failures_count\":0, \"memory_status\":\"ok\"," +
"\"log_time\":1541589919000,\"timestamp\":1519930800000},\"latest_record_time_stamp\":1519931700000," +
"\"latest_result_time_stamp\":1519930800000, \"retain\":false }", XContentType.JSON);

Map<String, Object> modelSizeStats = new HashMap<>();
modelSizeStats.put("job_id", JOB_ID);
modelSizeStats.put("result_type", "model_size_stats");
modelSizeStats.put("model_bytes", 51722);
modelSizeStats.put("peak_model_bytes", 61322);
modelSizeStats.put("total_by_field_count", 3);
modelSizeStats.put("total_over_field_count", 0);
modelSizeStats.put("total_partition_field_count", 2);
modelSizeStats.put("bucket_allocation_failures_count", 0);
modelSizeStats.put("memory_status", "ok");
modelSizeStats.put("log_time", 1541589919000L);
modelSizeStats.put("timestamp", 1519930800000L);

Map<String, Object> source = new HashMap<>();
source.put("job_id", JOB_ID);
source.put("timestamp", 1541589919000L);
source.put("description", "State persisted due to job close at 2018-11-07T11:25:19+0000");
source.put("snapshot_id", "1541589919");
source.put("snapshot_doc_count", 1);
source.put("latest_record_time_stamp", 1519931700000L);
source.put("latest_result_time_stamp", 1519930800000L);
source.put("retain", false);
source.put("model_size_stats", modelSizeStats);

indexRequest.source(XContentTestUtils.convertToXContent(source, XContentType.JSON), XContentType.JSON);
bulkRequest.add(indexRequest);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,7 @@ public HttpEntity getEntity() {
* Length of RFC 1123 format (with quotes and leading space), used in
* matchWarningHeaderPatternByPrefix(String).
*/
// @formatter:off
private static final int WARNING_HEADER_DATE_LENGTH = 0
+ 1
+ 1
Expand All @@ -131,6 +132,7 @@ public HttpEntity getEntity() {
+ 2 + 1 + 2 + 1 + 2 + 1
+ 3
+ 1;
// @formatter:on

/**
* Tests if a string matches the RFC 7234 specification for warning headers.
Expand Down
Loading

0 comments on commit d01efa4

Please sign in to comment.