From e1fdd00420b226b33adba3675ac0ff5d6daf954c Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Thu, 25 Oct 2018 21:12:19 -0700 Subject: [PATCH 01/14] Lowercase static final DeprecationLogger instance names (#34887) After discussing on the team's FixItFriday, we concluded that static final instance variables that are mutable should be lowercased. Historically, DeprecationLogger was uppercased more frequently than lowercased. --- .../analysis/common/CommonAnalysisPlugin.java | 10 +++++----- .../LegacyDelimitedPayloadTokenFilterFactory.java | 8 ++++---- .../percolator/PercolateQueryBuilder.java | 6 +++--- .../discovery/ec2/Ec2ClientSettings.java | 6 +++--- .../http/TestDeprecatedQueryBuilder.java | 7 ++++--- .../template/put/PutIndexTemplateRequest.java | 4 ++-- .../cluster/metadata/IndexTemplateMetaData.java | 4 ++-- .../org/elasticsearch/common/time/DateUtils.java | 4 ++-- .../org/elasticsearch/common/unit/ByteSizeValue.java | 4 ++-- .../common/xcontent/LoggingDeprecationHandler.java | 6 +++--- .../main/java/org/elasticsearch/http/HttpInfo.java | 4 ++-- .../elasticsearch/index/mapper/DynamicTemplate.java | 4 ++-- .../index/mapper/FieldNamesFieldMapper.java | 4 ++-- .../elasticsearch/index/mapper/MapperService.java | 4 ++-- .../elasticsearch/index/mapper/TypeFieldMapper.java | 4 ++-- .../elasticsearch/index/query/TypeQueryBuilder.java | 4 ++-- .../functionscore/RandomScoreFunctionBuilder.java | 4 ++-- .../index/similarity/SimilarityProviders.java | 8 ++++---- .../index/similarity/SimilarityService.java | 10 +++++----- .../indices/analysis/AnalysisModule.java | 6 +++--- .../admin/indices/RestPutIndexTemplateAction.java | 4 ++-- .../rest/action/search/RestSearchAction.java | 4 ++-- .../script/JodaCompatibleZonedDateTime.java | 4 ++-- .../java/org/elasticsearch/script/ParameterMap.java | 4 ++-- .../org/elasticsearch/script/ScriptMetaData.java | 6 +++--- .../org/elasticsearch/script/StoredScriptSource.java | 12 ++++++------ .../search/aggregations/InternalOrder.java | 4 ++-- .../SignificantTermsAggregatorFactory.java | 4 ++-- .../bucket/terms/TermsAggregatorFactory.java | 4 ++-- .../search/builder/SearchSourceBuilder.java | 4 ++-- .../fetch/subphase/DocValueFieldsFetchSubPhase.java | 4 ++-- .../elasticsearch/search/sort/FieldSortBuilder.java | 6 +++--- .../search/sort/GeoDistanceSortBuilder.java | 6 +++--- .../elasticsearch/search/sort/ScriptSortBuilder.java | 6 +++--- .../completion/context/GeoContextMapping.java | 6 +++--- .../org/elasticsearch/xpack/ml/job/JobManager.java | 8 ++++---- .../xpack/ml/utils/DomainSplitFunction.java | 4 ++-- 37 files changed, 101 insertions(+), 100 deletions(-) diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 59ecde8cf37ce..aaca4f9b1860f 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -19,6 +19,7 @@ package org.elasticsearch.analysis.common; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.LowerCaseFilter; @@ -115,7 +116,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; @@ -151,7 +151,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, ScriptPlugin { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(CommonAnalysisPlugin.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(CommonAnalysisPlugin.class)); private final SetOnce scriptService = new SetOnce<>(); @@ -376,7 +376,7 @@ public List getPreConfiguredCharFilters() { filters.add(PreConfiguredCharFilter.singleton("html_strip", false, HTMLStripCharFilter::new)); filters.add(PreConfiguredCharFilter.singletonWithVersion("htmlStrip", false, (reader, version) -> { if (version.onOrAfter(org.elasticsearch.Version.V_6_3_0)) { - DEPRECATION_LOGGER.deprecatedAndMaybeLog("htmlStrip_deprecation", + deprecationLogger.deprecatedAndMaybeLog("htmlStrip_deprecation", "The [htmpStrip] char filter name is deprecated and will be removed in a future version. " + "Please change the filter name to [html_strip] instead."); } @@ -414,7 +414,7 @@ public List getPreConfiguredTokenFilters() { new EdgeNGramTokenFilter(input, 1))); filters.add(PreConfiguredTokenFilter.singletonWithVersion("edgeNGram", false, (reader, version) -> { if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { - DEPRECATION_LOGGER.deprecatedAndMaybeLog("edgeNGram_deprecation", + deprecationLogger.deprecatedAndMaybeLog("edgeNGram_deprecation", "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " + "Please change the filter name to [edge_ngram] instead."); } @@ -438,7 +438,7 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("ngram", false, reader -> new NGramTokenFilter(reader, 1, 2, false))); filters.add(PreConfiguredTokenFilter.singletonWithVersion("nGram", false, (reader, version) -> { if (version.onOrAfter(org.elasticsearch.Version.V_6_4_0)) { - DEPRECATION_LOGGER.deprecatedAndMaybeLog("nGram_deprecation", + deprecationLogger.deprecatedAndMaybeLog("nGram_deprecation", "The [nGram] token filter name is deprecated and will be removed in a future version. " + "Please change the filter name to [ngram] instead."); } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java index 06c179d95f7af..051c5bf80c524 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java @@ -19,16 +19,16 @@ package org.elasticsearch.analysis.common; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.Version; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; public class LegacyDelimitedPayloadTokenFilterFactory extends DelimitedPayloadTokenFilterFactory { - private static final DeprecationLogger DEPRECATION_LOGGER = - new DeprecationLogger(Loggers.getLogger(LegacyDelimitedPayloadTokenFilterFactory.class)); + private static final DeprecationLogger deprecationLogger = + new DeprecationLogger(LogManager.getLogger(LegacyDelimitedPayloadTokenFilterFactory.class)); LegacyDelimitedPayloadTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, env, name, settings); @@ -37,7 +37,7 @@ public class LegacyDelimitedPayloadTokenFilterFactory extends DelimitedPayloadTo "[delimited_payload_filter] is not supported for new indices, use [delimited_payload] instead"); } if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_2_0)) { - DEPRECATION_LOGGER.deprecated("Deprecated [delimited_payload_filter] used, replaced by [delimited_payload]"); + deprecationLogger.deprecated("Deprecated [delimited_payload_filter] used, replaced by [delimited_payload]"); } } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 09cc04458ec70..3c0076ea18f9e 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.percolator; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.apache.lucene.index.BinaryDocValues; @@ -54,7 +55,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; @@ -95,7 +95,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "percolate"; - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ParseField.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(ParseField.class)); static final ParseField DOCUMENT_FIELD = new ParseField("document"); static final ParseField DOCUMENTS_FIELD = new ParseField("documents"); @@ -577,7 +577,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { final MapperService mapperService = context.getMapperService(); String type = mapperService.documentMapper().type(); if (documentType != null) { - DEPRECATION_LOGGER.deprecated("[document_type] parameter has been deprecated because types have been deprecated"); + deprecationLogger.deprecated("[document_type] parameter has been deprecated because types have been deprecated"); if (documentType.equals(type) == false) { throw new IllegalArgumentException("specified document_type [" + documentType + "] is not equal to the actual type [" + type + "]"); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java index d76c9e820b8b1..084814e13d873 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java @@ -77,7 +77,7 @@ final class Ec2ClientSettings { private static final Logger logger = Loggers.getLogger(Ec2ClientSettings.class); - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(logger); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); /** Credentials to authenticate with ec2. */ final AWSCredentials credentials; @@ -135,11 +135,11 @@ static AWSCredentials loadCredentials(Settings settings) { return null; } else { if (key.length() == 0) { - DEPRECATION_LOGGER.deprecated("Setting [{}] is set but [{}] is not, which will be unsupported in future", + deprecationLogger.deprecated("Setting [{}] is set but [{}] is not, which will be unsupported in future", SECRET_KEY_SETTING.getKey(), ACCESS_KEY_SETTING.getKey()); } if (secret.length() == 0) { - DEPRECATION_LOGGER.deprecated("Setting [{}] is set but [{}] is not, which will be unsupported in future", + deprecationLogger.deprecated("Setting [{}] is set but [{}] is not, which will be unsupported in future", ACCESS_KEY_SETTING.getKey(), SECRET_KEY_SETTING.getKey()); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecatedQueryBuilder.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecatedQueryBuilder.java index 5966ae08ac4e4..7d8115a20c9e9 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecatedQueryBuilder.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecatedQueryBuilder.java @@ -19,12 +19,12 @@ package org.elasticsearch.http; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.Query; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -41,7 +41,8 @@ public class TestDeprecatedQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "deprecated_match_all"; - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TestDeprecatedQueryBuilder.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(TestDeprecatedQueryBuilder.class)); public TestDeprecatedQueryBuilder() { // nothing to do @@ -79,7 +80,7 @@ public String getWriteableName() { @Override protected Query doToQuery(QueryShardContext context) throws IOException { - DEPRECATION_LOGGER.deprecated("[{}] query is deprecated, but used on [{}] index", NAME, context.index().getName()); + deprecationLogger.deprecated("[{}] query is deprecated, but used on [{}] index", NAME, context.index().getName()); return Queries.newMatchAllQuery(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 5b1b3dd2158ed..ce82d277dbbd7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -69,7 +69,7 @@ */ public class PutIndexTemplateRequest extends MasterNodeRequest implements IndicesRequest, ToXContent { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(PutIndexTemplateRequest.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(PutIndexTemplateRequest.class)); private String name; @@ -313,7 +313,7 @@ public PutIndexTemplateRequest source(Map templateSource) { if (name.equals("template")) { // This is needed to allow for bwc (beats, logstash) with pre-5.0 templates (#21009) if(entry.getValue() instanceof String) { - DEPRECATION_LOGGER.deprecated("Deprecated field [template] used, replaced by [index_patterns]"); + deprecationLogger.deprecated("Deprecated field [template] used, replaced by [index_patterns]"); patterns(Collections.singletonList((String) entry.getValue())); } } else if (name.equals("index_patterns")) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index 608e89514f25b..b7bb2656014ca 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -53,7 +53,7 @@ public class IndexTemplateMetaData extends AbstractDiffable { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(IndexTemplateMetaData.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(IndexTemplateMetaData.class)); private final String name; @@ -451,7 +451,7 @@ public static IndexTemplateMetaData fromXContent(XContentParser parser, String t } else if (token.isValue()) { // Prior to 5.1.0, elasticsearch only supported a single index pattern called `template` (#21009) if("template".equals(currentFieldName)) { - DEPRECATION_LOGGER.deprecated("Deprecated field [template] used, replaced by [index_patterns]"); + deprecationLogger.deprecated("Deprecated field [template] used, replaced by [index_patterns]"); builder.patterns(Collections.singletonList(parser.text())); } else if ("order".equals(currentFieldName)) { builder.order(parser.intValue()); diff --git a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java index ed04321ee83c5..5e8fae6092d42 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java @@ -41,7 +41,7 @@ public static DateTimeZone zoneIdToDateTimeZone(ZoneId zoneId) { return DateTimeZone.forID(zoneId.getId()); } - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(DateFormatters.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(DateFormatters.class)); // pkg private for tests static final Map DEPRECATED_SHORT_TIMEZONES; static { @@ -61,7 +61,7 @@ public static ZoneId dateTimeZoneToZoneId(DateTimeZone timeZone) { String deprecatedId = DEPRECATED_SHORT_TIMEZONES.get(timeZone.getID()); if (deprecatedId != null) { - DEPRECATION_LOGGER.deprecatedAndMaybeLog("timezone", + deprecationLogger.deprecatedAndMaybeLog("timezone", "Use of short timezone id " + timeZone.getID() + " is deprecated. Use " + deprecatedId + " instead"); return ZoneId.of(deprecatedId); } diff --git a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java index 0358f8f318de0..5d2bb928ada6a 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java +++ b/server/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java @@ -36,7 +36,7 @@ public class ByteSizeValue implements Writeable, Comparable, ToXContentFragment { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(ByteSizeValue.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(ByteSizeValue.class)); public static final ByteSizeValue ZERO = new ByteSizeValue(0, ByteSizeUnit.BYTES); @@ -237,7 +237,7 @@ private static ByteSizeValue parse(final String initialInput, final String norma } catch (final NumberFormatException e) { try { final double doubleValue = Double.parseDouble(s); - DEPRECATION_LOGGER.deprecated( + deprecationLogger.deprecated( "Fractional bytes values are deprecated. Use non-fractional bytes values instead: [{}] found for setting [{}]", initialInput, settingName); return new ByteSizeValue((long) (doubleValue * unit.toBytes(1))); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/LoggingDeprecationHandler.java b/server/src/main/java/org/elasticsearch/common/xcontent/LoggingDeprecationHandler.java index 5b92dec573df8..097093ccce5a2 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/LoggingDeprecationHandler.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/LoggingDeprecationHandler.java @@ -42,7 +42,7 @@ public class LoggingDeprecationHandler implements DeprecationHandler { * Changing that will require some research to make super duper * sure it is safe. */ - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(ParseField.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(ParseField.class)); private LoggingDeprecationHandler() { // Singleton @@ -50,11 +50,11 @@ private LoggingDeprecationHandler() { @Override public void usedDeprecatedName(String usedName, String modernName) { - DEPRECATION_LOGGER.deprecated("Deprecated field [{}] used, expected [{}] instead", usedName, modernName); + deprecationLogger.deprecated("Deprecated field [{}] used, expected [{}] instead", usedName, modernName); } @Override public void usedDeprecatedField(String usedName, String replacedWith) { - DEPRECATION_LOGGER.deprecated("Deprecated field [{}] used, replaced by [{}]", usedName, replacedWith); + deprecationLogger.deprecated("Deprecated field [{}] used, replaced by [{}]", usedName, replacedWith); } } diff --git a/server/src/main/java/org/elasticsearch/http/HttpInfo.java b/server/src/main/java/org/elasticsearch/http/HttpInfo.java index 22bcd31850d29..a24f508edc113 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpInfo.java +++ b/server/src/main/java/org/elasticsearch/http/HttpInfo.java @@ -37,7 +37,7 @@ public class HttpInfo implements Writeable, ToXContentFragment { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(HttpInfo.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(HttpInfo.class)); /** Whether to add hostname to publish host field when serializing. */ private static final boolean CNAME_IN_PUBLISH_HOST = @@ -86,7 +86,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (cnameInPublishHost) { publishAddressString = hostString + '/' + publishAddress.toString(); } else { - DEPRECATION_LOGGER.deprecated( + deprecationLogger.deprecated( "[http.publish_host] was printed as [ip:port] instead of [hostname/ip:port]. " + "This format is deprecated and will change to [hostname/ip:port] in a future version. " + "Use -Des.http.cname_in_publish_address=true to enforce non-deprecated formatting." diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java index 1b81977a57205..939736a0a893d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicTemplate.java @@ -36,7 +36,7 @@ public class DynamicTemplate implements ToXContentObject { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(DynamicTemplate.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(DynamicTemplate.class)); public enum MatchType { SIMPLE { @@ -208,7 +208,7 @@ public static DynamicTemplate parse(String name, Map conf, if (indexVersionCreated.onOrAfter(Version.V_6_0_0_alpha1)) { throw e; } else { - DEPRECATION_LOGGER.deprecated("match_mapping_type [" + matchMappingType + "] is invalid and will be ignored: " + deprecationLogger.deprecated("match_mapping_type [" + matchMappingType + "] is invalid and will be ignored: " + e.getMessage()); // this template is on an unknown type so it will never match anything // null indicates that the template should be ignored diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index e6ec6e446bffd..fb2dbea95e895 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -48,7 +48,7 @@ */ public class FieldNamesFieldMapper extends MetadataFieldMapper { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger( + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( LogManager.getLogger(FieldNamesFieldMapper.class)); public static final String NAME = "_field_names"; @@ -184,7 +184,7 @@ public Query termQuery(Object value, QueryShardContext context) { if (isEnabled() == false) { throw new IllegalStateException("Cannot run [exists] queries if the [_field_names] field is disabled"); } - DEPRECATION_LOGGER.deprecated( + deprecationLogger.deprecated( "terms query on the _field_names field is deprecated and will be removed, use exists query instead"); return super.termQuery(value, context); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 1bda015758736..7b9205881df62 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -117,7 +117,7 @@ public enum MergeReason { "_size", "_timestamp", "_ttl", IgnoredFieldMapper.NAME ); - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(MapperService.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(MapperService.class)); private final IndexAnalyzers indexAnalyzers; @@ -408,7 +408,7 @@ private synchronized Map internalMerge(@Nullable Documen throw new IllegalArgumentException("The [default] mapping cannot be updated on index [" + index().getName() + "]: defaults mappings are not useful anymore now that indices can have at most one type."); } else if (reason == MergeReason.MAPPING_UPDATE) { // only log in case of explicit mapping updates - DEPRECATION_LOGGER.deprecated("[_default_] mapping is deprecated since it is not useful anymore now that indexes " + + deprecationLogger.deprecated("[_default_] mapping is deprecated since it is not useful anymore now that indexes " + "cannot have more than one type"); } assert defaultMapper.type().equals(DEFAULT_MAPPING); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java index d6d453dbb2b5a..6f7c1b206178c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeFieldMapper.java @@ -92,7 +92,7 @@ public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext c static final class TypeFieldType extends StringFieldType { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(TypeFieldType.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(TypeFieldType.class)); TypeFieldType() { } @@ -160,7 +160,7 @@ public Query termsQuery(List values, QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { - DEPRECATION_LOGGER.deprecatedAndMaybeLog("range_single_type", + deprecationLogger.deprecatedAndMaybeLog("range_single_type", "Running [range] query on [_type] field for an index with a single type. As types are deprecated, this functionality will be removed in future releases."); Query result = new MatchAllDocsQuery(); String type = context.getMapperService().documentMapper().type(); diff --git a/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java index cb8005ad26c38..92556782d4eee 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TypeQueryBuilder.java @@ -40,7 +40,7 @@ public class TypeQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "type"; private static final ParseField VALUE_FIELD = new ParseField("value"); - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(TypeQueryBuilder.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(TypeQueryBuilder.class)); private final String type; @@ -127,7 +127,7 @@ public String getWriteableName() { @Override protected Query doToQuery(QueryShardContext context) throws IOException { - DEPRECATION_LOGGER.deprecated("The [type] query is deprecated, filter on a field instead."); + deprecationLogger.deprecated("The [type] query is deprecated, filter on a field instead."); //LUCENE 4 UPGRADE document mapper should use bytesref as well? DocumentMapper documentMapper = context.getMapperService().documentMapper(type); if (documentMapper == null) { diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java index b5bdc05adfb73..f5bdd0316ce40 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/RandomScoreFunctionBuilder.java @@ -40,7 +40,7 @@ */ public class RandomScoreFunctionBuilder extends ScoreFunctionBuilder { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger( + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( LogManager.getLogger(RandomScoreFunctionBuilder.class)); public static final String NAME = "random_score"; @@ -168,7 +168,7 @@ protected ScoreFunction doToFunction(QueryShardContext context) { if (field != null) { fieldType = context.getMapperService().fullName(field); } else { - DEPRECATION_LOGGER.deprecated( + deprecationLogger.deprecated( "As of version 7.0 Elasticsearch will require that a [field] parameter is provided when a [seed] is set"); fieldType = context.getMapperService().fullName(IdFieldMapper.NAME); } diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java index 2de877551a96f..aba860337f3df 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java @@ -67,7 +67,7 @@ final class SimilarityProviders { private SimilarityProviders() {} // no instantiation - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(SimilarityProviders.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(SimilarityProviders.class)); static final String DISCOUNT_OVERLAPS = "discount_overlaps"; private static final Map BASIC_MODELS; @@ -143,7 +143,7 @@ private static BasicModel parseBasicModel(Version indexCreatedVersion, Settings throw new IllegalArgumentException("Basic model [" + basicModel + "] isn't supported anymore, " + "please use another model."); } else { - DEPRECATION_LOGGER.deprecated("Basic model [" + basicModel + + deprecationLogger.deprecated("Basic model [" + basicModel + "] isn't supported anymore and has arbitrarily been replaced with [" + replacement + "]."); model = BASIC_MODELS.get(replacement); assert model != null; @@ -174,7 +174,7 @@ private static AfterEffect parseAfterEffect(Version indexCreatedVersion, Setting throw new IllegalArgumentException("After effect [" + afterEffect + "] isn't supported anymore, please use another effect."); } else { - DEPRECATION_LOGGER.deprecated("After effect [" + afterEffect + + deprecationLogger.deprecated("After effect [" + afterEffect + "] isn't supported anymore and has arbitrarily been replaced with [" + replacement + "]."); effect = AFTER_EFFECTS.get(replacement); assert effect != null; @@ -264,7 +264,7 @@ static void assertSettingsIsSubsetOf(String type, Version version, Settings sett if (version.onOrAfter(Version.V_7_0_0_alpha1)) { throw new IllegalArgumentException("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); } else { - DEPRECATION_LOGGER.deprecated("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); + deprecationLogger.deprecated("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); } } } diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index 552ef3c4aae8f..d7308c424be8e 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -51,7 +51,7 @@ public final class SimilarityService extends AbstractIndexComponent { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(SimilarityService.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(SimilarityService.class)); public static final String DEFAULT_SIMILARITY = "BM25"; private static final String CLASSIC_SIMILARITY = "classic"; private static final Map>> DEFAULTS; @@ -67,7 +67,7 @@ public final class SimilarityService extends AbstractIndexComponent { } else { final ClassicSimilarity similarity = SimilarityProviders.createClassicSimilarity(Settings.EMPTY, version); return () -> { - DEPRECATION_LOGGER.deprecated("The [classic] similarity is now deprecated in favour of BM25, which is generally " + deprecationLogger.deprecated("The [classic] similarity is now deprecated in favour of BM25, which is generally " + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " + "instead."); return similarity; @@ -90,7 +90,7 @@ public final class SimilarityService extends AbstractIndexComponent { throw new IllegalArgumentException("The [classic] similarity may not be used anymore. Please use the [BM25] " + "similarity or build a custom [scripted] similarity instead."); } else { - DEPRECATION_LOGGER.deprecated("The [classic] similarity is now deprecated in favour of BM25, which is generally " + deprecationLogger.deprecated("The [classic] similarity is now deprecated in favour of BM25, which is generally " + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " + "instead."); return SimilarityProviders.createClassicSimilarity(settings, version); @@ -154,7 +154,7 @@ public SimilarityService(IndexSettings indexSettings, ScriptService scriptServic defaultSimilarity = (providers.get("default") != null) ? providers.get("default").get() : providers.get(SimilarityService.DEFAULT_SIMILARITY).get(); if (providers.get("base") != null) { - DEPRECATION_LOGGER.deprecated("The [base] similarity is ignored since query normalization and coords have been removed"); + deprecationLogger.deprecated("The [base] similarity is ignored since query normalization and coords have been removed"); } } @@ -270,7 +270,7 @@ private static void fail(Version indexCreatedVersion, String message) { if (indexCreatedVersion.onOrAfter(Version.V_7_0_0_alpha1)) { throw new IllegalArgumentException(message); } else if (indexCreatedVersion.onOrAfter(Version.V_6_5_0)) { - DEPRECATION_LOGGER.deprecated(message); + deprecationLogger.deprecated(message); } } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index a22ada87d772c..43c61094ca7e4 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -71,7 +71,7 @@ public final class AnalysisModule { private static final IndexSettings NA_INDEX_SETTINGS; - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(AnalysisModule.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(AnalysisModule.class)); private final HunspellService hunspellService; private final AnalysisRegistry analysisRegistry; @@ -125,7 +125,7 @@ private NamedRegistry> setupTokenFilters(Li @Override public TokenFilterFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings) { if (indexSettings.getIndexVersionCreated().before(Version.V_7_0_0_alpha1)) { - DEPRECATION_LOGGER.deprecatedAndMaybeLog("standard_deprecation", + deprecationLogger.deprecatedAndMaybeLog("standard_deprecation", "The [standard] token filter name is deprecated and will be removed in a future version."); } else { throw new IllegalArgumentException("The [standard] token filter has been removed."); @@ -183,7 +183,7 @@ static Map setupPreConfiguredTokenFilters(List preConfiguredTokenFilters.register( "standard", PreConfiguredTokenFilter.singletonWithVersion("standard", true, (reader, version) -> { if (version.before(Version.V_7_0_0_alpha1)) { - DEPRECATION_LOGGER.deprecatedAndMaybeLog("standard_deprecation", + deprecationLogger.deprecatedAndMaybeLog("standard_deprecation", "The [standard] token filter is deprecated and will be removed in a future version."); } else { throw new IllegalArgumentException("The [standard] token filter has been removed."); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java index 798a2cbe30fa6..258bb05a7d66c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java @@ -36,7 +36,7 @@ public class RestPutIndexTemplateAction extends BaseRestHandler { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger( + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( LogManager.getLogger(RestPutIndexTemplateAction.class)); public RestPutIndexTemplateAction(Settings settings, RestController controller) { @@ -54,7 +54,7 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest(request.param("name")); if (request.hasParam("template")) { - DEPRECATION_LOGGER.deprecated("Deprecated parameter[template] used, replaced by [index_patterns]"); + deprecationLogger.deprecated("Deprecated parameter[template] used, replaced by [index_patterns]"); putRequest.patterns(Collections.singletonList(request.param("template"))); } else { putRequest.patterns(Arrays.asList(request.paramAsStringArray("index_patterns", Strings.EMPTY_ARRAY))); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index b677abcd79cfa..3efa9e633de30 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -57,7 +57,7 @@ public class RestSearchAction extends BaseRestHandler { public static final String TYPED_KEYS_PARAM = "typed_keys"; private static final Set RESPONSE_PARAMS = Collections.singleton(TYPED_KEYS_PARAM); - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(RestSearchAction.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestSearchAction.class)); public RestSearchAction(Settings settings, RestController controller) { super(settings); @@ -157,7 +157,7 @@ public static void parseSearchRequest(SearchRequest searchRequest, RestRequest r throw new IllegalArgumentException("You may only use the [include_type_name=false] option with the search API with the " + "[{index}/_search] endpoint."); } - DEPRECATION_LOGGER.deprecated("The {index}/{type}/_search endpoint is deprecated, use {index}/_search instead"); + deprecationLogger.deprecated("The {index}/{type}/_search endpoint is deprecated, use {index}/_search instead"); } searchRequest.types(Strings.splitStringByCommaToArray(types)); searchRequest.routing(request.param("routing")); diff --git a/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java b/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java index f6659e8041e35..33b93cd6fd0c0 100644 --- a/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java +++ b/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java @@ -47,13 +47,13 @@ * A wrapper around ZonedDateTime that exposes joda methods for backcompat. */ public class JodaCompatibleZonedDateTime { - private static final DeprecationLogger DEPRECATION_LOGGER = + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(JodaCompatibleZonedDateTime.class)); private static void logDeprecated(String key, String message, Object... params) { // NOTE: we don't check SpecialPermission because this will be called (indirectly) from scripts AccessController.doPrivileged((PrivilegedAction) () -> { - DEPRECATION_LOGGER.deprecatedAndMaybeLog(key, message, params); + deprecationLogger.deprecatedAndMaybeLog(key, message, params); return null; }); } diff --git a/server/src/main/java/org/elasticsearch/script/ParameterMap.java b/server/src/main/java/org/elasticsearch/script/ParameterMap.java index b59d057d66e62..b40c0f9b401d3 100644 --- a/server/src/main/java/org/elasticsearch/script/ParameterMap.java +++ b/server/src/main/java/org/elasticsearch/script/ParameterMap.java @@ -28,7 +28,7 @@ public final class ParameterMap implements Map { - private static final DeprecationLogger DEPRECATION_LOGGER = + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(ParameterMap.class)); private final Map params; @@ -64,7 +64,7 @@ public boolean containsValue(final Object value) { public Object get(final Object key) { String deprecationMessage = deprecations.get(key); if (deprecationMessage != null) { - DEPRECATION_LOGGER.deprecated(deprecationMessage); + deprecationLogger.deprecated(deprecationMessage); } return params.get(key); } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java index 1ce88f7c711e6..2d8e7f5ed6b92 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java @@ -51,7 +51,7 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont /** * Standard deprecation logger for used to deprecate allowance of empty templates. */ - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(ScriptMetaData.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(ScriptMetaData.class)); /** * A builder used to modify the currently stored scripts data held within @@ -219,9 +219,9 @@ public static ScriptMetaData fromXContent(XContentParser parser) throws IOExcept if (source.getSource().isEmpty()) { if (source.getLang().equals(Script.DEFAULT_TEMPLATE_LANG)) { - DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + deprecationLogger.deprecated("empty templates should no longer be used"); } else { - DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used"); + deprecationLogger.deprecated("empty scripts should no longer be used"); } } } diff --git a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java index 7a16c7ad2d51f..b141504adba90 100644 --- a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java @@ -59,7 +59,7 @@ public class StoredScriptSource extends AbstractDiffable imp /** * Standard deprecation logger for used to deprecate allowance of empty templates. */ - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(StoredScriptSource.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(StoredScriptSource.class)); /** * Standard {@link ParseField} for outer level of stored script source. @@ -145,9 +145,9 @@ private StoredScriptSource build(boolean ignoreEmpty) { if (source == null) { if (ignoreEmpty || Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { if (Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { - DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + deprecationLogger.deprecated("empty templates should no longer be used"); } else { - DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used"); + deprecationLogger.deprecated("empty scripts should no longer be used"); } } else { throw new IllegalArgumentException("must specify source for stored script"); @@ -155,9 +155,9 @@ private StoredScriptSource build(boolean ignoreEmpty) { } else if (source.isEmpty()) { if (ignoreEmpty || Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { if (Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { - DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + deprecationLogger.deprecated("empty templates should no longer be used"); } else { - DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used"); + deprecationLogger.deprecated("empty scripts should no longer be used"); } } else { throw new IllegalArgumentException("source cannot be empty"); @@ -257,7 +257,7 @@ public static StoredScriptSource parse(BytesReference content, XContentType xCon token = parser.nextToken(); if (token == Token.END_OBJECT) { - DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + deprecationLogger.deprecated("empty templates should no longer be used"); return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java index caea05f30e5b4..942cd91572e4d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalOrder.java @@ -527,7 +527,7 @@ public static void writeHistogramOrder(BucketOrder order, StreamOutput out, bool */ public static class Parser { - private static final DeprecationLogger DEPRECATION_LOGGER = + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(Parser.class)); /** @@ -565,7 +565,7 @@ public static BucketOrder parseOrderParam(XContentParser parser) throws IOExcept } // _term and _time order deprecated in 6.0; replaced by _key if ("_term".equals(orderKey) || "_time".equals(orderKey)) { - DEPRECATION_LOGGER.deprecated("Deprecated aggregation order key [{}] used, replaced by [_key]", orderKey); + deprecationLogger.deprecated("Deprecated aggregation order key [{}] used, replaced by [_key]", orderKey); } switch (orderKey) { case "_term": diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index 01777292613f9..3a424b0055f7a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -60,7 +60,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFactory implements Releasable { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger( + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( LogManager.getLogger(SignificantTermsAggregatorFactory.class)); private final IncludeExclude includeExclude; @@ -202,7 +202,7 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator pare if (valuesSource instanceof ValuesSource.Bytes) { ExecutionMode execution = null; if (executionHint != null) { - execution = ExecutionMode.fromString(executionHint, DEPRECATION_LOGGER); + execution = ExecutionMode.fromString(executionHint, deprecationLogger); } if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals == false) { execution = ExecutionMode.MAP; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 1b5eaee639e1b..2864ffe2fcefc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -47,7 +47,7 @@ import java.util.Map; public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(TermsAggregatorFactory.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(TermsAggregatorFactory.class)); static Boolean REMAP_GLOBAL_ORDS, COLLECT_SEGMENT_ORDS; @@ -128,7 +128,7 @@ protected Aggregator doCreateInternal(ValuesSource valuesSource, Aggregator pare if (valuesSource instanceof ValuesSource.Bytes) { ExecutionMode execution = null; if (executionHint != null) { - execution = ExecutionMode.fromString(executionHint, DEPRECATION_LOGGER); + execution = ExecutionMode.fromString(executionHint, deprecationLogger); } // In some cases, using ordinals is just not supported: override it if (valuesSource instanceof ValuesSource.Bytes.WithOrdinals == false) { diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 60767bbe3719b..a199ce3a37776 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -77,7 +77,7 @@ * @see org.elasticsearch.action.search.SearchRequest#source(SearchSourceBuilder) */ public final class SearchSourceBuilder implements Writeable, ToXContentObject, Rewriteable { - private static final DeprecationLogger DEPRECATION_LOGGER = + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(SearchSourceBuilder.class)); public static final ParseField FROM_FIELD = new ParseField("from"); @@ -1052,7 +1052,7 @@ public void parseXContent(XContentParser parser, boolean checkTrailingTokens) th scriptFields.add(new ScriptField(parser)); } } else if (INDICES_BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - DEPRECATION_LOGGER.deprecated( + deprecationLogger.deprecated( "Object format in indices_boost is deprecated, please use array format instead"); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java index 0819dfd74dfaf..eae3188d865b8 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java @@ -55,7 +55,7 @@ */ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger( + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( LogManager.getLogger(DocValueFieldsFetchSubPhase.class)); @Override @@ -82,7 +82,7 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept List noFormatFields = context.docValueFieldsContext().fields().stream().filter(f -> f.format == null).map(f -> f.field) .collect(Collectors.toList()); if (noFormatFields.isEmpty() == false) { - DEPRECATION_LOGGER.deprecated("There are doc-value fields which are not using a format. The output will " + deprecationLogger.deprecated("There are doc-value fields which are not using a format. The output will " + "change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass " + "[format={}] with a doc value field in order to opt in for the future behaviour and ease the migration to " + "7.0: {}", DocValueFieldsContext.USE_DEFAULT_FORMAT, noFormatFields); diff --git a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index c4e33fa091bd3..6d58917d84b8f 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -50,7 +50,7 @@ * A sort builder to sort based on a document field. */ public class FieldSortBuilder extends SortBuilder { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(FieldSortBuilder.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(FieldSortBuilder.class)); public static final String NAME = "field_sort"; public static final ParseField MISSING = new ParseField("missing"); @@ -402,14 +402,14 @@ public static FieldSortBuilder fromXContent(XContentParser parser, String fieldN static { PARSER.declareField(FieldSortBuilder::missing, p -> p.objectText(), MISSING, ValueType.VALUE); PARSER.declareString((fieldSortBuilder, nestedPath) -> { - DEPRECATION_LOGGER.deprecated("[nested_path] has been deprecated in favor of the [nested] parameter"); + deprecationLogger.deprecated("[nested_path] has been deprecated in favor of the [nested] parameter"); fieldSortBuilder.setNestedPath(nestedPath); }, NESTED_PATH_FIELD); PARSER.declareString(FieldSortBuilder::unmappedType , UNMAPPED_TYPE); PARSER.declareString((b, v) -> b.order(SortOrder.fromString(v)) , ORDER_FIELD); PARSER.declareString((b, v) -> b.sortMode(SortMode.fromString(v)), SORT_MODE); PARSER.declareObject(FieldSortBuilder::setNestedFilter, (p, c) -> { - DEPRECATION_LOGGER.deprecated("[nested_filter] has been deprecated in favour for the [nested] parameter"); + deprecationLogger.deprecated("[nested_filter] has been deprecated in favour for the [nested] parameter"); return SortBuilder.parseNestedFilter(p); }, NESTED_FILTER_FIELD); PARSER.declareObject(FieldSortBuilder::setNestedSort, (p, c) -> NestedSortBuilder.fromXContent(p), NESTED_FIELD); diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index 07af9ffb10c69..7774456b51e15 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -72,7 +72,7 @@ * A geo distance based sorting on a geo point like field. */ public class GeoDistanceSortBuilder extends SortBuilder { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(GeoDistanceSortBuilder.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(GeoDistanceSortBuilder.class)); public static final String NAME = "_geo_distance"; public static final String ALTERNATIVE_NAME = "_geoDistance"; @@ -502,7 +502,7 @@ public static GeoDistanceSortBuilder fromXContent(XContentParser parser, String fieldName = currentName; } else if (token == XContentParser.Token.START_OBJECT) { if (NESTED_FILTER_FIELD.match(currentName, parser.getDeprecationHandler())) { - DEPRECATION_LOGGER.deprecated("[nested_filter] has been deprecated in favour of the [nested] parameter"); + deprecationLogger.deprecated("[nested_filter] has been deprecated in favour of the [nested] parameter"); nestedFilter = parseInnerQueryBuilder(parser); } else if (NESTED_FIELD.match(currentName, parser.getDeprecationHandler())) { nestedSort = NestedSortBuilder.fromXContent(parser); @@ -532,7 +532,7 @@ public static GeoDistanceSortBuilder fromXContent(XContentParser parser, String } else if (SORTMODE_FIELD.match(currentName, parser.getDeprecationHandler())) { sortMode = SortMode.fromString(parser.text()); } else if (NESTED_PATH_FIELD.match(currentName, parser.getDeprecationHandler())) { - DEPRECATION_LOGGER.deprecated("[nested_path] has been deprecated in favour of the [nested] parameter"); + deprecationLogger.deprecated("[nested_path] has been deprecated in favour of the [nested] parameter"); nestedPath = parser.text(); } else if (IGNORE_UNMAPPED.match(currentName, parser.getDeprecationHandler())) { ignoreUnmapped = parser.booleanValue(); diff --git a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java index 427d262ba9bb7..8d5690d85837c 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/ScriptSortBuilder.java @@ -66,7 +66,7 @@ * Script sort builder allows to sort based on a custom script expression. */ public class ScriptSortBuilder extends SortBuilder { - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(ScriptSortBuilder.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(ScriptSortBuilder.class)); public static final String NAME = "_script"; public static final ParseField TYPE_FIELD = new ParseField("type"); @@ -279,11 +279,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params builderParams) PARSER.declareString((b, v) -> b.order(SortOrder.fromString(v)), ORDER_FIELD); PARSER.declareString((b, v) -> b.sortMode(SortMode.fromString(v)), SORTMODE_FIELD); PARSER.declareString((fieldSortBuilder, nestedPath) -> { - DEPRECATION_LOGGER.deprecated("[nested_path] has been deprecated in favor of the [nested] parameter"); + deprecationLogger.deprecated("[nested_path] has been deprecated in favor of the [nested] parameter"); fieldSortBuilder.setNestedPath(nestedPath); }, NESTED_PATH_FIELD); PARSER.declareObject(ScriptSortBuilder::setNestedFilter, (p, c) -> { - DEPRECATION_LOGGER.deprecated("[nested_filter] has been deprecated in favour for the [nested] parameter"); + deprecationLogger.deprecated("[nested_filter] has been deprecated in favour for the [nested] parameter"); return SortBuilder.parseNestedFilter(p); }, NESTED_FILTER_FIELD); PARSER.declareObject(ScriptSortBuilder::setNestedSort, (p, c) -> NestedSortBuilder.fromXContent(p), NESTED_FIELD); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java index 938c4963620ed..ae9cf6fc8c2f9 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java @@ -75,7 +75,7 @@ public class GeoContextMapping extends ContextMapping { static final String CONTEXT_PRECISION = "precision"; static final String CONTEXT_NEIGHBOURS = "neighbours"; - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(GeoContextMapping.class)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(GeoContextMapping.class)); private final int precision; private final String fieldName; @@ -293,7 +293,7 @@ protected void validateReferences(Version indexVersionCreated, Function parts, int publ public static List domainSplit(String host, Map params) { // NOTE: we don't check SpecialPermission because this will be called (indirectly) from scripts AccessController.doPrivileged((PrivilegedAction) () -> { - DEPRECATION_LOGGER.deprecatedAndMaybeLog("domainSplit", + deprecationLogger.deprecatedAndMaybeLog("domainSplit", "Method [domainSplit] taking params is deprecated. Remove the params argument."); return null; }); From be8ad674cf92c43c50720622b94e85e12b9d3683 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Fri, 26 Oct 2018 15:28:52 +1100 Subject: [PATCH 02/14] [TEST] HLRC: Expand failure messages in API checks (#34838) For the assertions in the "testApiNamingConventions" test that check the API contract, this change adds details of the method that was being checked, and the intent of the assertion (the API contract) --- .../client/RestHighLevelClientTests.java | 38 +++++++++++-------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 9535043e395d1..8f4ec4cc0ccca 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -712,41 +712,49 @@ public void testApiNamingConventions() throws Exception { assertTrue("method [" + apiName + "] is not final", Modifier.isFinal(method.getClass().getModifiers()) || Modifier.isFinal(method.getModifiers())); - assertTrue(Modifier.isPublic(method.getModifiers())); + assertTrue("method [" + method + "] should be public", Modifier.isPublic(method.getModifiers())); //we convert all the method names to snake case, hence we need to look for the '_async' suffix rather than 'Async' if (apiName.endsWith("_async")) { assertTrue("async method [" + method.getName() + "] doesn't have corresponding sync method", methods.containsKey(apiName.substring(0, apiName.length() - 6))); - assertThat(method.getReturnType(), equalTo(Void.TYPE)); - assertEquals(0, method.getExceptionTypes().length); + assertThat("async method [" + method + "] should return void", method.getReturnType(), equalTo(Void.TYPE)); + assertEquals("async method [" + method + "] should not throw any exceptions", 0, method.getExceptionTypes().length); if (apiName.equals("security.get_ssl_certificates_async")) { assertEquals(2, method.getParameterTypes().length); assertThat(method.getParameterTypes()[0], equalTo(RequestOptions.class)); assertThat(method.getParameterTypes()[1], equalTo(ActionListener.class)); } else { - assertEquals(3, method.getParameterTypes().length); - assertThat(method.getParameterTypes()[0].getSimpleName(), endsWith("Request")); - assertThat(method.getParameterTypes()[1], equalTo(RequestOptions.class)); - assertThat(method.getParameterTypes()[2], equalTo(ActionListener.class)); + assertEquals("async method [" + method + "] has the wrong number of arguments", 3, method.getParameterTypes().length); + assertThat("the first parameter to async method [" + method + "] should be a request type", + method.getParameterTypes()[0].getSimpleName(), endsWith("Request")); + assertThat("the second parameter to async method [" + method + "] is the wrong type", + method.getParameterTypes()[1], equalTo(RequestOptions.class)); + assertThat("the third parameter to async method [" + method + "] is the wrong type", + method.getParameterTypes()[2], equalTo(ActionListener.class)); } } else { //A few methods return a boolean rather than a response object if (apiName.equals("ping") || apiName.contains("exist")) { - assertThat(method.getReturnType().getSimpleName(), equalTo("boolean")); + assertThat("the return type for method [" + method + "] is incorrect", + method.getReturnType().getSimpleName(), equalTo("boolean")); } else { - assertThat(method.getReturnType().getSimpleName(), endsWith("Response")); + assertThat("the return type for method [" + method + "] is incorrect", + method.getReturnType().getSimpleName(), endsWith("Response")); } - assertEquals(1, method.getExceptionTypes().length); + assertEquals("incorrect number of exceptions for method [" + method + "]", 1, method.getExceptionTypes().length); //a few methods don't accept a request object as argument if (apiName.equals("ping") || apiName.equals("info") || apiName.equals("security.get_ssl_certificates")) { - assertEquals(1, method.getParameterTypes().length); - assertThat(method.getParameterTypes()[0], equalTo(RequestOptions.class)); + assertEquals("incorrect number of arguments for method [" + method + "]", 1, method.getParameterTypes().length); + assertThat("the parameter to method [" + method + "] is the wrong type", + method.getParameterTypes()[0], equalTo(RequestOptions.class)); } else { - assertEquals(apiName, 2, method.getParameterTypes().length); - assertThat(method.getParameterTypes()[0].getSimpleName(), endsWith("Request")); - assertThat(method.getParameterTypes()[1], equalTo(RequestOptions.class)); + assertEquals("incorrect number of arguments for method [" + method + "]", 2, method.getParameterTypes().length); + assertThat("the first parameter to method [" + method + "] is the wrong type", + method.getParameterTypes()[0].getSimpleName(), endsWith("Request")); + assertThat("the second parameter to method [" + method + "] is the wrong type", + method.getParameterTypes()[1], equalTo(RequestOptions.class)); } boolean remove = apiSpec.remove(apiName); From 734088673ed84fcb6a6958bfa880cc2731deb881 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Fri, 26 Oct 2018 07:45:02 +0200 Subject: [PATCH 03/14] [ML] Include message in field_stats for text log files (#34861) This change ensures the `message` field is always included in the `field_stats` for the semi-structured text log file file structure. Previously it was not, as it will almost certainly contain all distinct values. However, for consistency in the UI it's useful to include it. --- .../TextLogFileStructureFinder.java | 1 + .../TextLogFileStructureFinderTests.java | 22 +++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java index 7578ca8f7fbfb..591a326128271 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java @@ -89,6 +89,7 @@ static TextLogFileStructureFinder makeTextLogFileStructureFinder(List ex mappings.put(FileStructureUtils.DEFAULT_TIMESTAMP_FIELD, Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "date")); SortedMap fieldStats = new TreeMap<>(); + fieldStats.put("message", FileStructureUtils.calculateFieldStats(sampleMessages, timeoutChecker)); GrokPatternCreator grokPatternCreator = new GrokPatternCreator(explanation, sampleMessages, mappings, fieldStats, timeoutChecker); // We can't parse directly into @timestamp using Grok, so parse to some other time field, which the date filter will then remove diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java index a848f384e2e5f..de4244cd620a5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java @@ -7,11 +7,16 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.ml.filestructurefinder.FieldStats; import org.elasticsearch.xpack.core.ml.filestructurefinder.FileStructure; import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.Collections; import java.util.Set; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.not; public class TextLogFileStructureFinderTests extends FileStructureTestCase { @@ -127,6 +132,11 @@ public void testCreateConfigsGivenElasticsearchLog() throws Exception { assertEquals("\\[%{TIMESTAMP_ISO8601:timestamp}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*", structure.getGrokPattern()); assertEquals("timestamp", structure.getTimestampField()); assertEquals(Collections.singletonList("ISO8601"), structure.getJodaTimestampFormats()); + FieldStats messageFieldStats = structure.getFieldStats().get("message"); + assertNotNull(messageFieldStats); + for (String statMessage : messageFieldStats.getTopHits().stream().map(m -> (String) m.get("value")).collect(Collectors.toList())) { + assertThat(structureFinder.getSampleMessages(), hasItem(statMessage)); + } } public void testCreateConfigsGivenElasticsearchLogAndTimestampFieldOverride() throws Exception { @@ -158,6 +168,11 @@ public void testCreateConfigsGivenElasticsearchLogAndTimestampFieldOverride() th assertEquals("\\[%{TIMESTAMP_ISO8601:my_time}\\]\\[%{LOGLEVEL:loglevel} \\]\\[.*", structure.getGrokPattern()); assertEquals("my_time", structure.getTimestampField()); assertEquals(Collections.singletonList("ISO8601"), structure.getJodaTimestampFormats()); + FieldStats messageFieldStats = structure.getFieldStats().get("message"); + assertNotNull(messageFieldStats); + for (String statMessage : messageFieldStats.getTopHits().stream().map(m -> (String) m.get("value")).collect(Collectors.toList())) { + assertThat(structureFinder.getSampleMessages(), hasItem(statMessage)); + } } public void testCreateConfigsGivenElasticsearchLogAndGrokPatternOverride() throws Exception { @@ -191,6 +206,13 @@ public void testCreateConfigsGivenElasticsearchLogAndGrokPatternOverride() throw "\\[%{JAVACLASS:class} *\\] \\[%{HOSTNAME:node}\\] %{JAVALOGMESSAGE:message}", structure.getGrokPattern()); assertEquals("timestamp", structure.getTimestampField()); assertEquals(Collections.singletonList("ISO8601"), structure.getJodaTimestampFormats()); + FieldStats messageFieldStats = structure.getFieldStats().get("message"); + assertNotNull(messageFieldStats); + for (String statMessage : messageFieldStats.getTopHits().stream().map(m -> (String) m.get("value")).collect(Collectors.toList())) { + // In this case the "message" field was output by the Grok pattern, so "message" + // at the end of the processing will _not_ contain a complete sample message + assertThat(structureFinder.getSampleMessages(), not(hasItem(statMessage))); + } } public void testCreateConfigsGivenElasticsearchLogAndImpossibleGrokPatternOverride() { From 33345d96efbec6c4bcc57c04e5637b89c7a0e23f Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 26 Oct 2018 07:52:31 +0100 Subject: [PATCH 04/14] Delete flaky SettingsBasedHostProviderIT test (#34813) testClusterFormsByScanningPorts is flaky: sometimes in CI it's not possible to bind to any of the ports we need to in order for the port scanning to work. This change removes this test, and #34809 describes a better way to test this behaviour. --- .../zen/SettingsBasedHostProviderIT.java | 20 ------------------- 1 file changed, 20 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java b/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java index 52f3a05ce0866..1e64ced1a2595 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/SettingsBasedHostProviderIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.zen; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.common.settings.Settings; @@ -27,10 +26,7 @@ import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; -import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.LIMIT_LOCAL_PORTS_COUNT; -import static org.elasticsearch.transport.TcpTransport.PORT; -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/34781") @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class SettingsBasedHostProviderIT extends ESIntegTestCase { @@ -64,20 +60,4 @@ public void testClusterFormsWithSingleSeedHostInSettings() { ensureStableCluster(extraNodes + 1); } - - public void testClusterFormsByScanningPorts() { - // This test will fail if all 4 ports just less than the one used by the first node are already bound by something else. It's hard - // to know how often this might happen in reality, so let's try it and see. - - final String seedNodeName = internalCluster().startNode(); - final NodesInfoResponse nodesInfoResponse - = client(seedNodeName).admin().cluster().nodesInfo(new NodesInfoRequest("_local")).actionGet(); - final int seedNodePort = nodesInfoResponse.getNodes().get(0).getTransport().getAddress().publishAddress().getPort(); - final int minPort = randomIntBetween(seedNodePort - LIMIT_LOCAL_PORTS_COUNT + 1, seedNodePort - 1); - final String portSpec = minPort + "-" + seedNodePort; - - logger.info("--> using port specification [{}]", portSpec); - internalCluster().startNode(Settings.builder().put(PORT.getKey(), portSpec)); - ensureStableCluster(2); - } } From 65edec0d42cba034eca307976fd39b98ed1b1fdb Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 26 Oct 2018 12:55:21 +0200 Subject: [PATCH 05/14] TEST: Stablize Minio Free Port Search (#34894) * Binding to `0` gives us free ports that are assigned sequentially by Linux making collisions much less likely compared to manually finding a free port in a range * Closes #32208 --- plugins/repository-s3/build.gradle | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 888a9842833a1..5c57c9208c536 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -202,14 +202,11 @@ if (useFixture && minioDistribution) { doLast { // get free port - for (int port = 60920; port < 60940; port++) { - try { - javax.net.ServerSocketFactory.getDefault().createServerSocket(port, 1, InetAddress.getByName(minioAddress)).close() - minioPort = port - break - } catch (BindException e) { - logger.info("Port " + port + " for Minio process is already taken", e) - } + ServerSocket serverSocket = new ServerSocket(0, 1, InetAddress.getByName(minioAddress)) + try { + minioPort = serverSocket.localPort + } finally { + serverSocket.close() } if (minioPort == 0) { throw new GradleException("Could not find a free port for Minio") From db12005674413c752e09936eca95355e6cf75890 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 26 Oct 2018 12:56:19 +0200 Subject: [PATCH 06/14] Fix LineLength Check Suppressions: index.fielddata (#34891) * Fix linelength suppressions in index.fielddata * Some lines that were too long were dead code => Removed them and all code that became dead because of it * Relates #34884 --- .../resources/checkstyle_suppressions.xml | 19 ---- .../index/fielddata/IndexFieldData.java | 17 ---- .../index/fielddata/IndexFieldDataCache.java | 9 +- .../fielddata/IndexFieldDataService.java | 19 ++-- .../DoubleValuesComparatorSource.java | 3 +- .../FloatValuesComparatorSource.java | 3 +- .../LongValuesComparatorSource.java | 3 +- .../ordinals/GlobalOrdinalsBuilder.java | 3 +- .../fielddata/ordinals/MultiOrdinals.java | 3 +- .../fielddata/ordinals/OrdinalsBuilder.java | 94 +++---------------- .../ordinals/SinglePackedOrdinals.java | 3 +- .../plain/AbstractIndexOrdinalsFieldData.java | 3 +- .../AbstractLatLonPointDVIndexFieldData.java | 3 +- .../plain/BinaryDVIndexFieldData.java | 3 +- .../plain/NonEstimatingEstimator.java | 56 ----------- .../plain/PagedBytesIndexFieldData.java | 17 +--- .../SortedSetDVBytesAtomicFieldData.java | 2 +- .../cache/IndicesFieldDataCache.java | 11 ++- .../cache/IndicesFieldDataCacheListener.java | 55 ----------- .../fielddata/AbstractFieldDataTestCase.java | 4 +- .../AbstractStringFieldDataTestCase.java | 12 ++- .../index/fielddata/FieldDataCacheTests.java | 9 +- .../fielddata/IndexFieldDataServiceTests.java | 3 +- 23 files changed, 78 insertions(+), 276 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/index/fielddata/plain/NonEstimatingEstimator.java delete mode 100644 server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCacheListener.java diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index df90fe70497f5..592c1512d60cf 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -261,19 +261,6 @@ - - - - - - - - - - - - - @@ -318,8 +305,6 @@ - - @@ -513,10 +498,6 @@ - - - - diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java index 642270113cf76..bb3388bc943c9 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldData.java @@ -54,23 +54,6 @@ */ public interface IndexFieldData extends IndexComponent { - class CommonSettings { - public static final String SETTING_MEMORY_STORAGE_HINT = "memory_storage_hint"; - - public enum MemoryStorageFormat { - ORDINALS, PACKED, PAGED; - - public static MemoryStorageFormat fromString(String string) { - for (MemoryStorageFormat e : MemoryStorageFormat.values()) { - if (e.name().equalsIgnoreCase(string)) { - return e; - } - } - return null; - } - } - } - /** * The field name. */ diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataCache.java b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataCache.java index 5238f06a7909c..83108ad571190 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataCache.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataCache.java @@ -31,7 +31,8 @@ public interface IndexFieldDataCache { > FD load(LeafReaderContext context, IFD indexFieldData) throws Exception; - > IFD load(DirectoryReader indexReader, IFD indexFieldData) throws Exception; + > IFD load(DirectoryReader indexReader, IFD indexFieldData) + throws Exception; /** * Clears all the field data stored cached in on this index. @@ -59,13 +60,15 @@ default void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lo class None implements IndexFieldDataCache { @Override - public > FD load(LeafReaderContext context, IFD indexFieldData) throws Exception { + public > FD load(LeafReaderContext context, IFD indexFieldData) + throws Exception { return indexFieldData.loadDirect(context); } @Override @SuppressWarnings("unchecked") - public > IFD load(DirectoryReader indexReader, IFD indexFieldData) throws Exception { + public > IFD load(DirectoryReader indexReader, + IFD indexFieldData) throws Exception { return (IFD) indexFieldData.localGlobalDirect(indexReader); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java index 25b03bcb2506f..db5a7b437d35f 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java @@ -42,15 +42,16 @@ public class IndexFieldDataService extends AbstractIndexComponent implements Closeable { public static final String FIELDDATA_CACHE_VALUE_NODE = "node"; public static final String FIELDDATA_CACHE_KEY = "index.fielddata.cache"; - public static final Setting INDEX_FIELDDATA_CACHE_KEY = new Setting<>(FIELDDATA_CACHE_KEY, (s) -> FIELDDATA_CACHE_VALUE_NODE, (s) -> { - switch (s) { - case "node": - case "none": - return s; - default: - throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,none]"); - } - }, Property.IndexScope); + public static final Setting INDEX_FIELDDATA_CACHE_KEY = + new Setting<>(FIELDDATA_CACHE_KEY, (s) -> FIELDDATA_CACHE_VALUE_NODE, (s) -> { + switch (s) { + case "node": + case "none": + return s; + default: + throw new IllegalArgumentException("failed to parse [" + s + "] must be one of [node,none]"); + } + }, Property.IndexScope); private final CircuitBreakerService circuitBreakerService; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java index 0a273d88380eb..c414944801f79 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java @@ -43,7 +43,8 @@ public class DoubleValuesComparatorSource extends IndexFieldData.XFieldComparato private final IndexNumericFieldData indexFieldData; - public DoubleValuesComparatorSource(IndexNumericFieldData indexFieldData, @Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { + public DoubleValuesComparatorSource(IndexNumericFieldData indexFieldData, @Nullable Object missingValue, MultiValueMode sortMode, + Nested nested) { super(missingValue, sortMode, nested); this.indexFieldData = indexFieldData; } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java index beb27644a1b95..4621c7fd287d5 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java @@ -41,7 +41,8 @@ public class FloatValuesComparatorSource extends IndexFieldData.XFieldComparator private final IndexNumericFieldData indexFieldData; - public FloatValuesComparatorSource(IndexNumericFieldData indexFieldData, @Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { + public FloatValuesComparatorSource(IndexNumericFieldData indexFieldData, @Nullable Object missingValue, MultiValueMode sortMode, + Nested nested) { super(missingValue, sortMode, nested); this.indexFieldData = indexFieldData; } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java index f323709e8f5ee..48b0a1b155f92 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java @@ -40,7 +40,8 @@ public class LongValuesComparatorSource extends IndexFieldData.XFieldComparatorS private final IndexNumericFieldData indexFieldData; - public LongValuesComparatorSource(IndexNumericFieldData indexFieldData, @Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { + public LongValuesComparatorSource(IndexNumericFieldData indexFieldData, @Nullable Object missingValue, MultiValueMode sortMode, + Nested nested) { super(missingValue, sortMode, nested); this.indexFieldData = indexFieldData; } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java index 646c7adb40408..3c8f100a17db5 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/GlobalOrdinalsBuilder.java @@ -79,7 +79,8 @@ public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexO ); } - public static IndexOrdinalsFieldData buildEmpty(IndexSettings indexSettings, final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData) throws IOException { + public static IndexOrdinalsFieldData buildEmpty(IndexSettings indexSettings, final IndexReader indexReader, + IndexOrdinalsFieldData indexFieldData) throws IOException { assert indexReader.leaves().size() > 1; final AtomicOrdinalsFieldData[] atomicFD = new AtomicOrdinalsFieldData[indexReader.leaves().size()]; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java index 86e2787658c35..47307f27c4014 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/MultiOrdinals.java @@ -46,7 +46,8 @@ public class MultiOrdinals extends Ordinals { /** * Return true if this impl is going to be smaller than {@link SinglePackedOrdinals} by at least 20%. */ - public static boolean significantlySmallerThanSinglePackedOrdinals(int maxDoc, int numDocsWithValue, long numOrds, float acceptableOverheadRatio) { + public static boolean significantlySmallerThanSinglePackedOrdinals(int maxDoc, int numDocsWithValue, long numOrds, + float acceptableOverheadRatio) { int bitsPerOrd = PackedInts.bitsRequired(numOrds); bitsPerOrd = PackedInts.fastestFormatAndBits(numDocsWithValue, bitsPerOrd, acceptableOverheadRatio).bitsPerValue; // Compute the worst-case number of bits per value for offsets in the worst case, eg. if no docs have a value at the diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java index a2baf1fee6c0b..4b1292591cdd9 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java @@ -19,14 +19,7 @@ package org.elasticsearch.index.fielddata.ordinals; -import org.apache.lucene.index.PostingsEnum; -import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.BitSet; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefIterator; -import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LongsRef; import org.apache.lucene.util.packed.GrowableWriter; import org.apache.lucene.util.packed.PackedInts; @@ -43,11 +36,6 @@ */ public final class OrdinalsBuilder implements Closeable { - /** - * Whether to for the use of {@link MultiOrdinals} to store the ordinals for testing purposes. - */ - public static final String FORCE_MULTI_ORDINALS = "force_multi_ordinals"; - /** * Default acceptable overhead ratio. {@link OrdinalsBuilder} memory usage is mostly transient so it is likely a better trade-off to * trade memory for speed in order to resize less often. @@ -159,7 +147,8 @@ private static int numOrdinals(int level, long offset) { this.acceptableOverheadRatio = acceptableOverheadRatio; positions = new PagedGrowableWriter(maxDoc, PAGE_SIZE, startBitsPerValue, acceptableOverheadRatio); firstOrdinals = new GrowableWriter(startBitsPerValue, maxDoc, acceptableOverheadRatio); - // over allocate in order to never worry about the array sizes, 24 entries would allow to store several millions of ordinals per doc... + // over allocate in order to never worry about the array sizes, 24 entries would allow + // to store several millions of ordinals per doc... ordinals = new PagedGrowableWriter[24]; nextLevelSlices = new PagedGrowableWriter[24]; sizes = new int[24]; @@ -200,7 +189,8 @@ private int firstLevel(int docID, long ordinal) { } else { final long newSlice = newSlice(1); if (firstNextLevelSlices == null) { - firstNextLevelSlices = new PagedGrowableWriter(firstOrdinals.size(), PAGE_SIZE, 3, acceptableOverheadRatio); + firstNextLevelSlices = + new PagedGrowableWriter(firstOrdinals.size(), PAGE_SIZE, 3, acceptableOverheadRatio); } firstNextLevelSlices.set(docID, newSlice); final long offset = startOffset(1, newSlice); @@ -282,14 +272,14 @@ public void appendOrdinals(int docID, LongsRef ords) { private OrdinalsStore ordinals; private final LongsRef spare; - public OrdinalsBuilder(int maxDoc, float acceptableOverheadRatio) throws IOException { + public OrdinalsBuilder(int maxDoc, float acceptableOverheadRatio) { this.maxDoc = maxDoc; int startBitsPerValue = 8; ordinals = new OrdinalsStore(maxDoc, startBitsPerValue, acceptableOverheadRatio); spare = new LongsRef(); } - public OrdinalsBuilder(int maxDoc) throws IOException { + public OrdinalsBuilder(int maxDoc) { this(maxDoc, DEFAULT_ACCEPTABLE_OVERHEAD_RATIO); } @@ -303,7 +293,7 @@ public LongsRef docOrds(int docID) { } /** - * Return a {@link org.apache.lucene.util.packed.PackedInts.Reader} instance mapping every doc ID to its first ordinal + 1 if it exists and 0 otherwise. + * Return a {@link PackedInts.Reader} instance mapping every doc ID to its first ordinal + 1 if it exists and 0 otherwise. */ public PackedInts.Reader getFirstOrdinals() { return ordinals.firstOrdinals; @@ -339,27 +329,6 @@ public OrdinalsBuilder addDoc(int doc) { return this; } - /** - * Returns true iff this builder contains a document ID that is associated with more than one ordinal. Otherwise false; - */ - public boolean isMultiValued() { - return numMultiValuedDocs > 0; - } - - /** - * Returns the number distinct of document IDs with one or more values. - */ - public int getNumDocsWithValue() { - return numDocsWithValue; - } - - /** - * Returns the number distinct of document IDs associated with exactly one value. - */ - public int getNumSingleValuedDocs() { - return numDocsWithValue - numMultiValuedDocs; - } - /** * Returns the number distinct of document IDs associated with two or more values. */ @@ -381,29 +350,15 @@ public long getValueCount() { return currentOrd + 1; } - /** - * Builds a {@link BitSet} where each documents bit is that that has one or more ordinals associated with it. - * if every document has an ordinal associated with it this method returns null - */ - public BitSet buildDocsWithValuesSet() { - if (numDocsWithValue == maxDoc) { - return null; - } - final FixedBitSet bitSet = new FixedBitSet(maxDoc); - for (int docID = 0; docID < maxDoc; ++docID) { - if (ordinals.firstOrdinals.get(docID) != 0) { - bitSet.set(docID); - } - } - return bitSet; - } - /** * Builds an {@link Ordinals} instance from the builders current state. */ public Ordinals build() { final float acceptableOverheadRatio = PackedInts.DEFAULT; - if (numMultiValuedDocs > 0 || MultiOrdinals.significantlySmallerThanSinglePackedOrdinals(maxDoc, numDocsWithValue, getValueCount(), acceptableOverheadRatio)) { + if (numMultiValuedDocs > 0 + || MultiOrdinals.significantlySmallerThanSinglePackedOrdinals( + maxDoc, numDocsWithValue, getValueCount(), acceptableOverheadRatio) + ) { // MultiOrdinals can be smaller than SinglePackedOrdinals for sparse fields return new MultiOrdinals(this, acceptableOverheadRatio); } else { @@ -419,33 +374,6 @@ public int maxDoc() { return maxDoc; } - /** - * This method iterates all terms in the given {@link TermsEnum} and - * associates each terms ordinal with the terms documents. The caller must - * exhaust the returned {@link BytesRefIterator} which returns all values - * where the first returned value is associated with the ordinal {@code 1} - * etc. - */ - public BytesRefIterator buildFromTerms(final TermsEnum termsEnum) throws IOException { - return new BytesRefIterator() { - private PostingsEnum docsEnum = null; - - @Override - public BytesRef next() throws IOException { - BytesRef ref; - if ((ref = termsEnum.next()) != null) { - docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); - nextOrdinal(); - int docId; - while ((docId = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { - addDoc(docId); - } - } - return ref; - } - }; - } - /** * Closes this builder and release all resources. */ diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java index 27f0aadee8719..9bbdaeb3f7b54 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ordinals/SinglePackedOrdinals.java @@ -42,7 +42,8 @@ public SinglePackedOrdinals(OrdinalsBuilder builder, float acceptableOverheadRat assert builder.getNumMultiValuesDocs() == 0; this.valueCount = (int) builder.getValueCount(); // We don't reuse the builder as-is because it might have been built with a higher overhead ratio - final PackedInts.Mutable reader = PackedInts.getMutable(builder.maxDoc(), PackedInts.bitsRequired(valueCount), acceptableOverheadRatio); + final PackedInts.Mutable reader = + PackedInts.getMutable(builder.maxDoc(), PackedInts.bitsRequired(valueCount), acceptableOverheadRatio); PackedInts.copy(builder.getFirstOrdinals(), 0, reader, 0, builder.maxDoc(), 8 * 1024); this.reader = reader; } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java index d89c6d64d4915..0dc0de838a3e5 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexOrdinalsFieldData.java @@ -36,7 +36,8 @@ import java.io.IOException; -public abstract class AbstractIndexOrdinalsFieldData extends AbstractIndexFieldData implements IndexOrdinalsFieldData { +public abstract class AbstractIndexOrdinalsFieldData extends AbstractIndexFieldData + implements IndexOrdinalsFieldData { private final double minFrequency, maxFrequency; private final int minSegmentSize; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLatLonPointDVIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLatLonPointDVIndexFieldData.java index 6c92d571196ec..ed77d3d5f8b37 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLatLonPointDVIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLatLonPointDVIndexFieldData.java @@ -43,7 +43,8 @@ public abstract class AbstractLatLonPointDVIndexFieldData extends DocValuesIndex } @Override - public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, + boolean reverse) { throw new IllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance"); } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java index a7e1981766704..13d90ea36e738 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVIndexFieldData.java @@ -46,7 +46,8 @@ public BinaryDVAtomicFieldData loadDirect(LeafReaderContext context) throws Exce } @Override - public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, + boolean reverse) { XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); /** * Check if we can use a simple {@link SortedSetSortField} compatible with index sorting and diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/NonEstimatingEstimator.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/NonEstimatingEstimator.java deleted file mode 100644 index 481b2b3c84ca0..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/NonEstimatingEstimator.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.fielddata.plain; - -import org.apache.lucene.index.Terms; -import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.breaker.CircuitBreaker; - -import java.io.IOException; - -/** - * Estimator that does nothing except for adjust the breaker after the field - * data has been loaded. Useful for field data implementations that do not yet - * have pre-loading estimations. - */ -public class NonEstimatingEstimator implements AbstractIndexFieldData.PerValueEstimator { - - private final CircuitBreaker breaker; - - NonEstimatingEstimator(CircuitBreaker breaker) { - this.breaker = breaker; - } - - @Override - public long bytesPerValue(BytesRef term) { - return 0; - } - - @Override - public TermsEnum beforeLoad(Terms terms) throws IOException { - return null; - } - - @Override - public void afterLoad(@Nullable TermsEnum termsEnum, long actualUsed) { - breaker.addWithoutBreaking(actualUsed); - } -} diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java index e6100d2f89311..66a44a95afc7f 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java @@ -65,7 +65,7 @@ public Builder(double minFrequency, double maxFrequency, int minSegmentSize) { @Override public IndexOrdinalsFieldData build(IndexSettings indexSettings, MappedFieldType fieldType, - IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) { + IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) { return new PagedBytesIndexFieldData(indexSettings, fieldType.name(), cache, breakerService, minFrequency, maxFrequency, minSegmentSize); } @@ -78,7 +78,8 @@ public PagedBytesIndexFieldData(IndexSettings indexSettings, String fieldName, } @Override - public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, + boolean reverse) { XFieldComparatorSource source = new BytesRefFieldComparatorSource(this, missingValue, sortMode, nested); return new SortField(getFieldName(), source, reverse); } @@ -88,7 +89,8 @@ public AtomicOrdinalsFieldData loadDirect(LeafReaderContext context) throws Exce LeafReader reader = context.reader(); AtomicOrdinalsFieldData data = null; - PagedBytesEstimator estimator = new PagedBytesEstimator(context, breakerService.getBreaker(CircuitBreaker.FIELDDATA), getFieldName()); + PagedBytesEstimator estimator = + new PagedBytesEstimator(context, breakerService.getBreaker(CircuitBreaker.FIELDDATA), getFieldName()); Terms terms = reader.terms(getFieldName()); if (terms == null) { data = AbstractAtomicOrdinalsFieldData.empty(); @@ -250,14 +252,5 @@ public void afterLoad(TermsEnum termsEnum, long actualUsed) { breaker.addWithoutBreaking(-(estimatedBytes - actualUsed)); } - /** - * Adjust the breaker when no terms were actually loaded, but the field - * data takes up space regardless. For instance, when ordinals are - * used. - * @param actualUsed bytes actually used - */ - public void adjustForNoTerms(long actualUsed) { - breaker.addWithoutBreaking(actualUsed); - } } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesAtomicFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesAtomicFieldData.java index d336c1d5cd7ce..5e8a30ab6e4ea 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesAtomicFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/SortedSetDVBytesAtomicFieldData.java @@ -32,7 +32,7 @@ import java.util.function.Function; /** - * An {@link AtomicFieldData} implementation that uses Lucene {@link org.apache.lucene.index.SortedSetDocValues}. + * An {@link AtomicFieldData} implementation that uses Lucene {@link SortedSetDocValues}. */ public final class SortedSetDVBytesAtomicFieldData extends AbstractAtomicOrdinalsFieldData { diff --git a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index e7573ae9f71a0..4a784af6bb3f3 100644 --- a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -90,7 +90,10 @@ public void onRemoval(RemovalNotification notification) { final Accountable value = notification.getValue(); for (IndexFieldDataCache.Listener listener : key.listeners) { try { - listener.onRemoval(key.shardId, indexCache.fieldName, notification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED, value.ramBytesUsed()); + listener.onRemoval( + key.shardId, indexCache.fieldName, + notification.getRemovalReason() == RemovalNotification.RemovalReason.EVICTED, value.ramBytesUsed() + ); } catch (Exception e) { // load anyway since listeners should not throw exceptions logger.error("Failed to call listener on field data cache unloading", e); @@ -125,7 +128,8 @@ static class IndexFieldCache implements IndexFieldDataCache, IndexReader.ClosedL } @Override - public > FD load(final LeafReaderContext context, final IFD indexFieldData) throws Exception { + public > FD load(final LeafReaderContext context, + final IFD indexFieldData) throws Exception { final ShardId shardId = ShardUtils.extractShardId(context.reader()); final IndexReader.CacheHelper cacheHelper = context.reader().getCoreCacheHelper(); if (cacheHelper == null) { @@ -151,7 +155,8 @@ public > FD load(fina } @Override - public > IFD load(final DirectoryReader indexReader, final IFD indexFieldData) throws Exception { + public > IFD load(final DirectoryReader indexReader, + final IFD indexFieldData) throws Exception { final ShardId shardId = ShardUtils.extractShardId(indexReader); final IndexReader.CacheHelper cacheHelper = indexReader.getReaderCacheHelper(); if (cacheHelper == null) { diff --git a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCacheListener.java b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCacheListener.java deleted file mode 100644 index 1995bb2dfb805..0000000000000 --- a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCacheListener.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.indices.fielddata.cache; - -import org.apache.lucene.util.Accountable; -import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.index.fielddata.IndexFieldDataCache; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.breaker.CircuitBreakerService; - -/** - * A {@link org.elasticsearch.index.fielddata.IndexFieldDataCache.Listener} implementation that updates indices (node) level statistics / service about - * field data entries being loaded and unloaded. - * - * Currently it only decrements the memory used in the {@link CircuitBreakerService}. - */ -public class IndicesFieldDataCacheListener implements IndexFieldDataCache.Listener { - - private final CircuitBreakerService circuitBreakerService; - - @Inject - public IndicesFieldDataCacheListener(CircuitBreakerService circuitBreakerService) { - this.circuitBreakerService = circuitBreakerService; - } - - @Override - public void onCache(ShardId shardId, String fieldName, Accountable fieldData) { - } - - @Override - public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) { - assert sizeInBytes >= 0 : "When reducing circuit breaker, it should be adjusted with a number higher or equal to 0 and not [" + sizeInBytes + "]"; - circuitBreakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(-sizeInBytes); - } - -} - diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java index 7decbe9024fdf..6d9ec95af16cd 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java @@ -135,7 +135,9 @@ public void setup() throws Exception { mapperService = indexService.mapperService(); indicesFieldDataCache = getInstanceFromNode(IndicesService.class).getIndicesFieldDataCache(); // LogByteSizeMP to preserve doc ID order - writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy())); + writer = new IndexWriter( + new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy()) + ); shardContext = indexService.newQueryShardContext(0, null, () -> 0, null); } diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java index ef2a9b3873580..21d84203f6df3 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/AbstractStringFieldDataTestCase.java @@ -264,7 +264,8 @@ public void testActualMissingValue(boolean reverse) throws IOException { final String missingValue = values[1]; IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); SortField sortField = indexFieldData.sortField(missingValue, MultiValueMode.MIN, null, reverse); - TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(sortField)); + TopFieldDocs topDocs = + searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(sortField)); assertEquals(numDocs, topDocs.totalHits.value); BytesRef previousValue = reverse ? UnicodeUtil.BIG_TERM : new BytesRef(); for (int i = 0; i < topDocs.scoreDocs.length; ++i) { @@ -318,7 +319,8 @@ public void testSortMissing(boolean first, boolean reverse) throws IOException { final IndexFieldData indexFieldData = getForField("value"); IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer)); SortField sortField = indexFieldData.sortField(first ? "_first" : "_last", MultiValueMode.MIN, null, reverse); - TopFieldDocs topDocs = searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(sortField)); + TopFieldDocs topDocs = + searcher.search(new MatchAllDocsQuery(), randomBoolean() ? numDocs : randomIntBetween(10, numDocs), new Sort(sortField)); assertEquals(numDocs, topDocs.totalHits.value); BytesRef previousValue = first ? null : reverse ? UnicodeUtil.BIG_TERM : new BytesRef(); for (int i = 0; i < topDocs.scoreDocs.length; ++i) { @@ -406,8 +408,10 @@ public void testNestedSorting(MultiValueMode sortMode) throws IOException { Query parentFilter = new TermQuery(new Term("type", "parent")); Query childFilter = Queries.not(parentFilter); Nested nested = createNested(searcher, parentFilter, childFilter); - BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource(fieldData, missingValue, sortMode, nested); - ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); + BytesRefFieldComparatorSource nestedComparatorSource = + new BytesRefFieldComparatorSource(fieldData, missingValue, sortMode, nested); + ToParentBlockJoinQuery query = + new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("text", nestedComparatorSource)); TopFieldDocs topDocs = searcher.search(query, randomIntBetween(1, numParents), sort); assertTrue(topDocs.scoreDocs.length > 0); diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/FieldDataCacheTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/FieldDataCacheTests.java index 375b10e262766..2590dc2660333 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/FieldDataCacheTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/FieldDataCacheTests.java @@ -65,7 +65,8 @@ public void testLoadGlobal_neverCacheIfFieldIsMissing() throws Exception { } } iw.close(); - DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(dir), new ShardId("_index", "_na_", 0)); + DirectoryReader ir = + ElasticsearchDirectoryReader.wrap(DirectoryReader.open(dir), new ShardId("_index", "_na_", 0)); DummyAccountingFieldDataCache fieldDataCache = new DummyAccountingFieldDataCache(); // Testing SortedSetDVOrdinalsIndexFieldData: @@ -114,12 +115,14 @@ private class DummyAccountingFieldDataCache implements IndexFieldDataCache { private int cachedGlobally = 0; @Override - public > FD load(LeafReaderContext context, IFD indexFieldData) throws Exception { + public > FD load(LeafReaderContext context, IFD indexFieldData) + throws Exception { return indexFieldData.loadDirect(context); } @Override - public > IFD load(DirectoryReader indexReader, IFD indexFieldData) throws Exception { + public > IFD load(DirectoryReader indexReader, + IFD indexFieldData) throws Exception { cachedGlobally++; return (IFD) indexFieldData.localGlobalDirect(indexReader); } diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java index 2eba60a1a5f83..7b17b727be2b1 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/IndexFieldDataServiceTests.java @@ -252,7 +252,8 @@ private void doTestRequireDocValues(MappedFieldType ft) { ThreadPool threadPool = new TestThreadPool("random_threadpool_name"); try { IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null); - IndexFieldDataService ifds = new IndexFieldDataService(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), cache, null, null); + IndexFieldDataService ifds = + new IndexFieldDataService(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), cache, null, null); ft.setName("some_long"); ft.setHasDocValues(true); ifds.getForField(ft); // no exception From c42f350a81e42218bdfecd294aa0ab689b4e9029 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Fri, 26 Oct 2018 12:57:31 +0200 Subject: [PATCH 07/14] [Test] Fix FullClusterRestartIT.testShrink() with copy_settings param (#34853) The pull request #34338 added strict deprecation mode to the REST tests and adds the copy_settings param when testing the shrink of an index. This parameter has been added in 6.4.0 and will be removed in 8.0, so the test now needs to take care of the old cluster version when adding the copy_settings param. --- .../org/elasticsearch/upgrades/FullClusterRestartIT.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 6f878d24c871a..d26dc2029f6cf 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -325,7 +325,6 @@ public void testClusterState() throws Exception { } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/34853") public void testShrink() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; @@ -364,7 +363,9 @@ public void testShrink() throws IOException { client().performRequest(updateSettingsRequest); Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); - shrinkIndexRequest.addParameter("copy_settings", "true"); + if (getOldClusterVersion().onOrAfter(Version.V_6_4_0)) { + shrinkIndexRequest.addParameter("copy_settings", "true"); + } shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); client().performRequest(shrinkIndexRequest); From 3f1fec1813ef095d5cdc8e3c6bb2aa3b69feb090 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Fri, 26 Oct 2018 15:19:35 +0300 Subject: [PATCH 08/14] [Docs] audit logfile structured format (#34584) Documents the new structured logfile format for auditing that was introduced by #31931. Most changes herein are for 6.x . In 7.0 the deprecated format is gone and a follow-up PR is in order. --- .../settings/audit-settings.asciidoc | 48 +++++++++++++------ .../security/auditing/output-logfile.asciidoc | 40 ++++++++++++++-- .../en/security/auditing/overview.asciidoc | 11 +++-- 3 files changed, 77 insertions(+), 22 deletions(-) diff --git a/docs/reference/settings/audit-settings.asciidoc b/docs/reference/settings/audit-settings.asciidoc index 524198df58c47..ec661a1f30c9f 100644 --- a/docs/reference/settings/audit-settings.asciidoc +++ b/docs/reference/settings/audit-settings.asciidoc @@ -18,10 +18,18 @@ Set to `true` to enable auditing on the node. The default value is `false`. `xpack.security.audit.outputs`:: Specifies where audit logs are output. For example: `[ index, logfile ]`. The default value is `logfile`, which puts the auditing events in a dedicated -`_access.log` file on the node. You can also specify `index`, which -puts the auditing events in an {es} index that is prefixed with -`.security_audit_log`. The index can reside on the same cluster or a separate -cluster. +file named `_audit.log` on each node. +You can also specify `index`, which puts the auditing events in an {es} index +that is prefixed with `.security_audit_log`. The index can reside on the same +cluster or a separate cluster. + +For backwards compatibility reasons, if you use the logfile output type, a +`_access.log` file is also created. It contains the same +information, but it uses the older (pre-6.5.0) formatting style. +If the backwards compatible format is not required, it should be disabled. +To do that, change its logger level to `off` in the `log4j2.properties` file. +For more information, see <>. + + -- TIP: If the index is unavailable, it is possible for auditing events to @@ -57,17 +65,27 @@ audited in plain text when including the request body in audit events. [[node-audit-settings]] ==== Local Node Info Settings -`xpack.security.audit.logfile.prefix.emit_node_name`:: -Specifies whether to include the node's name in the local node info. The -default value is `true`. - -`xpack.security.audit.logfile.prefix.emit_node_host_address`:: -Specifies whether to include the node's IP address in the local node info. The -default value is `false`. - -`xpack.security.audit.logfile.prefix.emit_node_host_name`:: -Specifies whether to include the node's host name in the local node info. The -default value is `false`. +`xpack.security.audit.logfile.emit_node_name`:: +Specifies whether to include the <> as a field in +each audit event. +The default value is `true`. + +`xpack.security.audit.logfile.emit_node_host_address`:: +Specifies whether to include the node's IP address as a field in each audit event. +The default value is `false`. + +`xpack.security.audit.logfile.emit_node_host_name`:: +Specifies whether to include the node's host name as a field in each audit event. +The default value is `false`. + +`xpack.security.audit.logfile.emit_node_id`:: +Specifies whether to include the node id as a field in each audit event. +This is available for the new format only. That is to say, this information +does not exist in the `_access.log` file. +Unlike <>, whose value might change if the administrator +changes the setting in the config file, the node id will persist across cluster +restarts and the administrator cannot change it. +The default value is `true`. [[index-audit-settings]] ==== Audit Log Indexing Configuration Settings diff --git a/x-pack/docs/en/security/auditing/output-logfile.asciidoc b/x-pack/docs/en/security/auditing/output-logfile.asciidoc index ee33f618f9665..d9e7eb81c3f3a 100644 --- a/x-pack/docs/en/security/auditing/output-logfile.asciidoc +++ b/x-pack/docs/en/security/auditing/output-logfile.asciidoc @@ -3,13 +3,41 @@ === Logfile audit output The `logfile` audit output is the default output for auditing. It writes data to -the `_access.log` file in the logs directory. +the `_audit.log` file in the logs directory. To maintain +compatibility with releases prior to 6.5.0, a `_access.log` file +is also generated. They differ in the output format but the contents +are similar. For systems that are not ingesting the audit file for search or +analytics it is strongly recommended to only keep the newer format. +Turning off the deprecated output format can be achieved by disabling the logger +in the `log4j2.properties` file (hint: there is a config comment +about it). +For more information, see {ref}/logging.html#configuring-logging-levels[configuring-logging]. + [float] [[audit-log-entry-format]] === Log entry format -The format of a log entry is: +The log entries in the `_audit.log` file +have the following format: + +- Each log entry is a one line JSON document and each one is printed on a separate line. +- The fields of a log entry are ordered. However, if a field does not have a value it + will not be printed. The precise line pattern, together with the complete field + order, are specified in the `log4j2.properties` config file. +- The log entry does not contain nested inner JSON objects, i.e. the doc is flat. +- The field names follow a dotted notation to flatten inner objects. +- A field's value can be a string, a number or an array of strings. +- A field's value, a request body as well, will be escaped as per the JSON RFC 4627. + +There is a list of <> specifying the +set of fields for each sog entry type. + +[float] +[[deprecated-audit-log-entry-format]] +=== Deprecated log entry format + +The log entries in the `_access.log` file have the following format: [source,txt] ---------------------------------------------------------------------------- @@ -48,8 +76,14 @@ audited in plain text when including the request body in audit events. [[logging-file]] You can also configure how the logfile is written in the `log4j2.properties` file located in `ES_PATH_CONF`. By default, audit information is appended to the -`_access.log` file located in the standard Elasticsearch `logs` directory +`_audit.log` file located in the standard Elasticsearch `logs` directory (typically located at `$ES_HOME/logs`). The file rolls over on a daily basis. +The deprecated logfile audit format (`_access.log`) can be disabled +from the same `log4j2.properties` file (hint: look for the comment +instructing to set the log level to `off`). The deprecated format is a duplication +of information that is in place to assure backwards compatibility. If you are +not strict about the audit format it is strongly recommended to only use the +`_audit.log` log appender. [float] [[audit-log-ignore-policy]] diff --git a/x-pack/docs/en/security/auditing/overview.asciidoc b/x-pack/docs/en/security/auditing/overview.asciidoc index b874af3d1c43c..6f04e17d83138 100644 --- a/x-pack/docs/en/security/auditing/overview.asciidoc +++ b/x-pack/docs/en/security/auditing/overview.asciidoc @@ -16,11 +16,14 @@ must set `xpack.security.audit.enabled` to `true` in `elasticsearch.yml`. {Security} provides two ways to persist audit logs: * The <> output, which persists events to - a dedicated `_access.log` file on the host's file system. -* The <> output, which persists events to an Elasticsearch index. -The audit index can reside on the same cluster, or a separate cluster. + a dedicated `_audit.log` file on the host's file system. + For backwards compatibility reasons, a file named `_access.log` + is also generated. +* The <> output, which persists events to an Elasticsearch + index. The audit index can reside on the same cluster, or a separate cluster. -By default, only the `logfile` output is used when enabling auditing. +By default, only the `logfile` output is used when enabling auditing, +implicitly outputing to both `_audit.log` and `_access.log`. To facilitate browsing and analyzing the events, you can also enable indexing by setting `xpack.security.audit.outputs` in `elasticsearch.yml`: From 306f1d78f8996439d3f966585d9b765c1a7bd300 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Fri, 26 Oct 2018 15:14:24 +0200 Subject: [PATCH 09/14] [CCR] Retry when no index shard stats can be found (#34852) Index shard stats for the follower shard are fetched, when a shard follow task is started. This is needed in order to bootstap the shard follow task with the follower global checkpoint. Sometimes index shard stats are not available (e.g. during a restart) and we fail now, while it is very likely that these stats will be available some time later. --- .../ccr/action/ShardFollowTasksExecutor.java | 7 +- .../elasticsearch/xpack/CcrIntegTestCase.java | 94 +++++++++++++++++++ .../xpack/ccr/IndexFollowingIT.java | 82 ---------------- .../xpack/ccr/RestartIndexFollowingIT.java | 62 ++++++++++++ 4 files changed, 162 insertions(+), 83 deletions(-) create mode 100644 x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 5a82b45cf8c38..88d07566c74bd 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -205,7 +205,12 @@ private void fetchFollowerShardInfo( client.admin().indices().stats(new IndicesStatsRequest().indices(shardId.getIndexName()), ActionListener.wrap(r -> { IndexStats indexStats = r.getIndex(shardId.getIndexName()); if (indexStats == null) { - errorHandler.accept(new IndexNotFoundException(shardId.getIndex())); + IndexMetaData indexMetaData = clusterService.state().metaData().index(shardId.getIndex()); + if (indexMetaData != null) { + errorHandler.accept(new ShardNotFoundException(shardId)); + } else { + errorHandler.accept(new IndexNotFoundException(shardId.getIndex())); + } return; } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index a4f9d69bfa924..c3cbc7436d987 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -8,6 +8,9 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; @@ -24,9 +27,11 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; @@ -35,6 +40,7 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalTestCluster; @@ -48,6 +54,9 @@ import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; +import org.elasticsearch.xpack.core.ccr.action.PauseFollowAction; +import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; +import org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -59,14 +68,17 @@ import java.util.Collection; import java.util.Collections; import java.util.Locale; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Function; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -279,6 +291,88 @@ protected void ensureEmptyWriteBuffers() throws Exception { }); } + protected void pauseFollow(String... indices) throws Exception { + for (String index : indices) { + final PauseFollowAction.Request unfollowRequest = new PauseFollowAction.Request(index); + followerClient().execute(PauseFollowAction.INSTANCE, unfollowRequest).get(); + } + ensureNoCcrTasks(); + } + + protected void ensureNoCcrTasks() throws Exception { + assertBusy(() -> { + final ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); + final PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertThat(tasks.tasks(), empty()); + + ListTasksRequest listTasksRequest = new ListTasksRequest(); + listTasksRequest.setDetailed(true); + ListTasksResponse listTasksResponse = followerClient().admin().cluster().listTasks(listTasksRequest).get(); + int numNodeTasks = 0; + for (TaskInfo taskInfo : listTasksResponse.getTasks()) { + if (taskInfo.getAction().startsWith(ListTasksAction.NAME) == false) { + numNodeTasks++; + } + } + assertThat(numNodeTasks, equalTo(0)); + }, 30, TimeUnit.SECONDS); + } + + protected String getIndexSettings(final int numberOfShards, final int numberOfReplicas, + final Map additionalIndexSettings) throws IOException { + final String settings; + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("settings"); + { + builder.field("index.number_of_shards", numberOfShards); + builder.field("index.number_of_replicas", numberOfReplicas); + for (final Map.Entry additionalSetting : additionalIndexSettings.entrySet()) { + builder.field(additionalSetting.getKey(), additionalSetting.getValue()); + } + } + builder.endObject(); + builder.startObject("mappings"); + { + builder.startObject("doc"); + { + builder.startObject("properties"); + { + builder.startObject("f"); + { + builder.field("type", "integer"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + settings = BytesReference.bytes(builder).utf8ToString(); + } + return settings; + } + + public static PutFollowAction.Request putFollow(String leaderIndex, String followerIndex) { + PutFollowAction.Request request = new PutFollowAction.Request(); + request.setRemoteCluster("leader_cluster"); + request.setLeaderIndex(leaderIndex); + request.setFollowRequest(resumeFollow(followerIndex)); + return request; + } + + public static ResumeFollowAction.Request resumeFollow(String followerIndex) { + ResumeFollowAction.Request request = new ResumeFollowAction.Request(); + request.setFollowerIndex(followerIndex); + request.setMaxRetryDelay(TimeValue.timeValueMillis(10)); + request.setReadPollTimeout(TimeValue.timeValueMillis(10)); + return request; + } + static void removeCCRRelatedMetadataFromClusterState(ClusterService clusterService) throws Exception { CountDownLatch latch = new CountDownLatch(1); clusterService.submitStateUpdateTask("remove-ccr-related-metadata", new ClusterStateUpdateTask() { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 794c64e6bc4ff..d755e495b7d08 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -8,7 +8,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; @@ -756,33 +755,6 @@ private CheckedRunnable assertTask(final int numberOfPrimaryShards, f }; } - private void pauseFollow(String... indices) throws Exception { - for (String index : indices) { - final PauseFollowAction.Request unfollowRequest = new PauseFollowAction.Request(index); - followerClient().execute(PauseFollowAction.INSTANCE, unfollowRequest).get(); - } - ensureNoCcrTasks(); - } - - private void ensureNoCcrTasks() throws Exception { - assertBusy(() -> { - final ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); - final PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - assertThat(tasks.tasks(), empty()); - - ListTasksRequest listTasksRequest = new ListTasksRequest(); - listTasksRequest.setDetailed(true); - ListTasksResponse listTasksResponse = followerClient().admin().cluster().listTasks(listTasksRequest).get(); - int numNodeTasks = 0; - for (TaskInfo taskInfo : listTasksResponse.getTasks()) { - if (taskInfo.getAction().startsWith(ListTasksAction.NAME) == false) { - numNodeTasks++; - } - } - assertThat(numNodeTasks, equalTo(0)); - }, 30, TimeUnit.SECONDS); - } - private CheckedRunnable assertExpectedDocumentRunnable(final int value) { return () -> { final GetResponse getResponse = followerClient().prepareGet("index2", "doc", Integer.toString(value)).get(); @@ -792,45 +764,6 @@ private CheckedRunnable assertExpectedDocumentRunnable(final int valu }; } - private String getIndexSettings(final int numberOfShards, final int numberOfReplicas, - final Map additionalIndexSettings) throws IOException { - final String settings; - try (XContentBuilder builder = jsonBuilder()) { - builder.startObject(); - { - builder.startObject("settings"); - { - builder.field("index.number_of_shards", numberOfShards); - builder.field("index.number_of_replicas", numberOfReplicas); - for (final Map.Entry additionalSetting : additionalIndexSettings.entrySet()) { - builder.field(additionalSetting.getKey(), additionalSetting.getValue()); - } - } - builder.endObject(); - builder.startObject("mappings"); - { - builder.startObject("doc"); - { - builder.startObject("properties"); - { - builder.startObject("f"); - { - builder.field("type", "integer"); - } - builder.endObject(); - } - builder.endObject(); - } - builder.endObject(); - } - builder.endObject(); - } - builder.endObject(); - settings = BytesReference.bytes(builder).utf8ToString(); - } - return settings; - } - private String getIndexSettingsWithNestedMapping(final int numberOfShards, final int numberOfReplicas, final Map additionalIndexSettings) throws IOException { final String settings; @@ -968,19 +901,4 @@ private void assertTotalNumberOfOptimizedIndexing(Index followerIndex, int numbe }); } - public static PutFollowAction.Request putFollow(String leaderIndex, String followerIndex) { - PutFollowAction.Request request = new PutFollowAction.Request(); - request.setRemoteCluster("leader_cluster"); - request.setLeaderIndex(leaderIndex); - request.setFollowRequest(resumeFollow(followerIndex)); - return request; - } - - public static ResumeFollowAction.Request resumeFollow(String followerIndex) { - ResumeFollowAction.Request request = new ResumeFollowAction.Request(); - request.setFollowerIndex(followerIndex); - request.setMaxRetryDelay(TimeValue.timeValueMillis(10)); - request.setReadPollTimeout(TimeValue.timeValueMillis(10)); - return request; - } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java new file mode 100644 index 0000000000000..49fbe15ddabae --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/RestartIndexFollowingIT.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.xpack.CcrIntegTestCase; +import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; + +import java.util.Locale; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; + +public class RestartIndexFollowingIT extends CcrIntegTestCase { + + @Override + protected int numberOfNodesPerCluster() { + return 1; + } + + public void testFollowIndex() throws Exception { + final String leaderIndexSettings = getIndexSettings(1, 0, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"); + assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureLeaderGreen("index1"); + + final PutFollowAction.Request followRequest = putFollow("index1", "index2"); + followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + + final long firstBatchNumDocs = randomIntBetween(2, 64); + logger.info("Indexing [{}] docs as first batch", firstBatchNumDocs); + for (int i = 0; i < firstBatchNumDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + } + + assertBusy(() -> { + assertThat(followerClient().prepareSearch("index2").get().getHits().totalHits, equalTo(firstBatchNumDocs)); + }); + + getFollowerCluster().fullRestart(); + ensureFollowerGreen("index2"); + + final long secondBatchNumDocs = randomIntBetween(2, 64); + for (int i = 0; i < secondBatchNumDocs; i++) { + leaderClient().prepareIndex("index1", "doc").setSource("{}", XContentType.JSON).get(); + } + + assertBusy(() -> { + assertThat(followerClient().prepareSearch("index2").get().getHits().totalHits, + equalTo(firstBatchNumDocs + secondBatchNumDocs)); + }); + } + +} From 31810837812b7b554a021d45a6cc920c7bdc1dcf Mon Sep 17 00:00:00 2001 From: markharwood Date: Fri, 26 Oct 2018 14:21:35 +0100 Subject: [PATCH 10/14] HLRC - add support for source exists API (#34519) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HLRC - add support for source exists API API re-uses the GetRequest object (following the precedent set by the plain “exists” api). Relates to #27205 --- .../client/RequestConverters.java | 12 ++++ .../client/RestHighLevelClient.java | 26 +++++++++ .../java/org/elasticsearch/client/CrudIT.java | 55 +++++++++++++++++++ .../client/RestHighLevelClientTests.java | 1 - .../high-level/document/exists.asciidoc | 7 +++ 5 files changed, 100 insertions(+), 1 deletion(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 106caea027e27..2ff944b0a5343 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -261,6 +261,18 @@ private static Request getStyleRequest(String method, GetRequest getRequest) { return request; } + + static Request sourceExists(GetRequest getRequest) { + Request request = new Request(HttpHead.METHOD_NAME, endpoint(getRequest.index(), getRequest.type(), getRequest.id(), "_source")); + + Params parameters = new Params(request); + parameters.withPreference(getRequest.preference()); + parameters.withRouting(getRequest.routing()); + parameters.withRefresh(getRequest.refresh()); + parameters.withRealtime(getRequest.realtime()); + // Version params are not currently supported by the source exists API so are not passed + return request; + } static Request multiGet(MultiGetRequest multiGetRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_mget"); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 342e3efbb6a35..7e8a965361426 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -727,6 +727,32 @@ public final void existsAsync(GetRequest getRequest, RequestOptions options, Act emptySet()); } + /** + * Checks for the existence of a document with a "_source" field. Returns true if it exists, false otherwise. + * See Source exists API + * on elastic.co + * @param getRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return true if the document and _source field exists, false otherwise + * @throws IOException in case there is a problem sending the request + */ + public boolean existsSource(GetRequest getRequest, RequestOptions options) throws IOException { + return performRequest(getRequest, RequestConverters::sourceExists, options, RestHighLevelClient::convertExistsResponse, emptySet()); + } + + /** + * Asynchronously checks for the existence of a document with a "_source" field. Returns true if it exists, false otherwise. + * See Source exists API + * on elastic.co + * @param getRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void existsSourceAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { + performRequestAsync(getRequest, RequestConverters::sourceExists, options, RestHighLevelClient::convertExistsResponse, listener, + emptySet()); + } + /** * Index a document using the Index API. * See Index API on elastic.co diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index e679a85f67f0c..1dd27cff0d92a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -194,6 +194,61 @@ public void testExists() throws IOException { assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); } } + + public void testSourceExists() throws IOException { + { + GetRequest getRequest = new GetRequest("index", "type", "id"); + assertFalse(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); + } + IndexRequest index = new IndexRequest("index", "type", "id"); + index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", XContentType.JSON); + index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); + highLevelClient().index(index, RequestOptions.DEFAULT); + { + GetRequest getRequest = new GetRequest("index", "type", "id"); + assertTrue(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); + } + { + GetRequest getRequest = new GetRequest("index", "type", "does_not_exist"); + assertFalse(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); + } + { + GetRequest getRequest = new GetRequest("index", "type", "does_not_exist").version(1); + assertFalse(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); + } + } + + public void testSourceDoesNotExist() throws IOException { + final String noSourceIndex = "no_source"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + String mapping = "\"_doc\": { \"_source\": {\n" + + " \"enabled\": false\n" + + " } }"; + createIndex(noSourceIndex, settings, mapping); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(noSourceIndex, "_doc", "1") + .source(Collections.singletonMap("foo", 1), XContentType.JSON)) + .add(new IndexRequest(noSourceIndex, "_doc", "2") + .source(Collections.singletonMap("foo", 2), XContentType.JSON)) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + GetRequest getRequest = new GetRequest(noSourceIndex, "_doc", "1"); + assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); + assertFalse(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); + } + } public void testGet() throws IOException { { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 8f4ec4cc0ccca..d40c3196e54f4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -650,7 +650,6 @@ public void testApiNamingConventions() throws Exception { "cluster.remote_info", "count", "create", - "exists_source", "get_source", "indices.delete_alias", "indices.delete_template", diff --git a/docs/java-rest/high-level/document/exists.asciidoc b/docs/java-rest/high-level/document/exists.asciidoc index ac6968d1f3752..3a09203bab6c6 100644 --- a/docs/java-rest/high-level/document/exists.asciidoc +++ b/docs/java-rest/high-level/document/exists.asciidoc @@ -29,3 +29,10 @@ include-tagged::{doc-tests-file}[{api}-request] <5> Disable fetching stored fields. include::../execution.asciidoc[] + + +==== Source exists request +A variant of the exists request is `existsSource` method which has the additional check +that the document in question has stored the `source`. If the mapping for the index has opted +to remove support for storing JSON source in documents then this method will return false +for documents in this index. From 5c2c1f44c895129023bda848e718facaa6b220af Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Fri, 26 Oct 2018 08:01:38 -0600 Subject: [PATCH 11/14] [Style] Fix line lengths in action.admin.indices (#34890) Clean up lines over 140 characters in the the `org.elasticsearch.action.admin.indices` packages --- .../resources/checkstyle_suppressions.xml | 58 ------------------ .../alias/TransportIndicesAliasesAction.java | 6 +- .../exists/TransportAliasesExistAction.java | 9 ++- .../alias/get/BaseAliasesRequestBuilder.java | 4 +- .../alias/get/TransportGetAliasesAction.java | 9 ++- .../analyze/TransportAnalyzeAction.java | 60 ++++++++++++------- .../ClearIndicesCacheRequestBuilder.java | 3 +- .../close/TransportCloseIndexAction.java | 12 ++-- .../create/CreateIndexRequestBuilder.java | 3 +- .../create/TransportCreateIndexAction.java | 9 ++- .../delete/DeleteIndexRequestBuilder.java | 3 +- .../delete/TransportDeleteIndexAction.java | 9 ++- .../indices/IndicesExistsRequestBuilder.java | 3 +- .../indices/TransportIndicesExistsAction.java | 15 +++-- .../types/TransportTypesExistsAction.java | 12 ++-- .../types/TypesExistsRequestBuilder.java | 3 +- .../indices/flush/TransportFlushAction.java | 6 +- .../flush/TransportShardFlushAction.java | 3 +- .../forcemerge/ForceMergeRequestBuilder.java | 3 +- .../forcemerge/TransportForceMergeAction.java | 7 ++- .../get/GetFieldMappingsRequestBuilder.java | 3 +- .../get/GetMappingsRequestBuilder.java | 3 +- .../TransportGetFieldMappingsIndexAction.java | 3 +- .../put/TransportPutMappingAction.java | 16 +++-- .../open/TransportOpenIndexAction.java | 9 ++- .../recovery/TransportRecoveryAction.java | 4 +- .../refresh/TransportRefreshAction.java | 6 +- .../admin/indices/segments/IndexSegments.java | 5 +- .../IndicesSegmentsRequestBuilder.java | 3 +- .../TransportIndicesSegmentsAction.java | 13 ++-- .../get/GetSettingsRequestBuilder.java | 3 +- .../get/TransportGetSettingsAction.java | 6 +- .../put/TransportUpdateSettingsAction.java | 14 +++-- .../put/UpdateSettingsRequestBuilder.java | 3 +- .../IndicesShardStoreRequestBuilder.java | 5 +- .../shards/IndicesShardStoresResponse.java | 3 +- .../TransportIndicesShardStoresAction.java | 53 ++++++++++------ .../admin/indices/stats/IndexStats.java | 3 +- .../stats/IndicesStatsRequestBuilder.java | 3 +- .../stats/TransportIndicesStatsAction.java | 7 ++- .../DeleteIndexTemplateRequestBuilder.java | 3 +- .../TransportDeleteIndexTemplateAction.java | 35 ++++++----- .../get/GetIndexTemplatesRequestBuilder.java | 6 +- .../get/TransportGetIndexTemplatesAction.java | 9 ++- .../put/TransportPutIndexTemplateAction.java | 9 ++- .../upgrade/get/IndexUpgradeStatus.java | 5 +- .../get/TransportUpgradeStatusAction.java | 13 ++-- .../get/UpgradeStatusRequestBuilder.java | 3 +- .../upgrade/post/TransportUpgradeAction.java | 14 +++-- .../post/TransportUpgradeSettingsAction.java | 14 +++-- .../post/UpgradeSettingsRequestBuilder.java | 3 +- .../query/TransportValidateQueryAction.java | 9 ++- .../query/ValidateQueryRequestBuilder.java | 3 +- .../indices/TransportAnalyzeActionTests.java | 18 ++++-- .../clear/ClearIndicesCacheBlocksIT.java | 6 +- .../indices/flush/SyncedFlushUnitTests.java | 13 ++-- .../action/admin/indices/get/GetIndexIT.java | 3 +- .../shards/IndicesShardStoreRequestIT.java | 9 ++- .../IndicesShardStoreResponseTests.java | 40 +++++++++---- 59 files changed, 372 insertions(+), 245 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 592c1512d60cf..1297b305ea0c4 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -78,58 +78,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -371,12 +319,6 @@ - - - - - - diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index d6ecaf8b2c9f3..c0753899bc048 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -61,7 +61,8 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final IndicesAliasesRequest request, final ClusterState state, + final ActionListener listener) { //Expand the indices names List actions = request.aliasActions(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java index 6b77b9a39e97e..998b49623e5ae 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java @@ -36,8 +36,10 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadAction> extends MasterNodeReadOperationRequestBuilder { +public abstract class BaseAliasesRequestBuilder> + extends MasterNodeReadOperationRequestBuilder { public BaseAliasesRequestBuilder(ElasticsearchClient client, Action action, String... aliases) { super(client, action, new GetAliasesRequest(aliases)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 2b71e85a53761..faa075afca8fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -40,8 +40,10 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction tokenFilterFactoryList = - parseTokenFilterFactories(request, indexSettings, analysisRegistry, environment, new Tuple<>(keywordTokenizerName, keywordTokenizerFactory), charFilterFactoryList, true); + parseTokenFilterFactories(request, indexSettings, analysisRegistry, environment, + new Tuple<>(keywordTokenizerName, keywordTokenizerFactory), charFilterFactoryList, true); analyzer = new CustomAnalyzer("keyword_for_normalizer", keywordTokenizerFactory, @@ -311,7 +315,8 @@ private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analy TokenFilterFactory[] tokenFilterFactories = customAnalyzer.tokenFilters(); String[][] charFiltersTexts = new String[charFilterFactories != null ? charFilterFactories.length : 0][request.text().length]; - TokenListCreator[] tokenFiltersTokenListCreator = new TokenListCreator[tokenFilterFactories != null ? tokenFilterFactories.length : 0]; + TokenListCreator[] tokenFiltersTokenListCreator = new TokenListCreator[tokenFilterFactories != null ? + tokenFilterFactories.length : 0]; TokenListCreator tokenizerTokenListCreator = new TokenListCreator(maxTokenCount); @@ -348,14 +353,18 @@ private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analy } } - DetailAnalyzeResponse.CharFilteredText[] charFilteredLists = new DetailAnalyzeResponse.CharFilteredText[charFiltersTexts.length]; + DetailAnalyzeResponse.CharFilteredText[] charFilteredLists = + new DetailAnalyzeResponse.CharFilteredText[charFiltersTexts.length]; + if (charFilterFactories != null) { for (int charFilterIndex = 0; charFilterIndex < charFiltersTexts.length; charFilterIndex++) { charFilteredLists[charFilterIndex] = new DetailAnalyzeResponse.CharFilteredText( charFilterFactories[charFilterIndex].name(), charFiltersTexts[charFilterIndex]); } } - DetailAnalyzeResponse.AnalyzeTokenList[] tokenFilterLists = new DetailAnalyzeResponse.AnalyzeTokenList[tokenFiltersTokenListCreator.length]; + DetailAnalyzeResponse.AnalyzeTokenList[] tokenFilterLists = + new DetailAnalyzeResponse.AnalyzeTokenList[tokenFiltersTokenListCreator.length]; + if (tokenFilterFactories != null) { for (int tokenFilterIndex = 0; tokenFilterIndex < tokenFiltersTokenListCreator.length; tokenFilterIndex++) { tokenFilterLists[tokenFilterIndex] = new DetailAnalyzeResponse.AnalyzeTokenList( @@ -382,7 +391,9 @@ private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analy return detailResponse; } - private static TokenStream createStackedTokenStream(String source, CharFilterFactory[] charFilterFactories, TokenizerFactory tokenizerFactory, TokenFilterFactory[] tokenFilterFactories, int current) { + private static TokenStream createStackedTokenStream(String source, CharFilterFactory[] charFilterFactories, + TokenizerFactory tokenizerFactory, TokenFilterFactory[] tokenFilterFactories, + int current) { Reader reader = new StringReader(source); for (CharFilterFactory charFilterFactory : charFilterFactories) { reader = charFilterFactory.create(reader); @@ -457,7 +468,8 @@ private void analyze(TokenStream stream, Analyzer analyzer, String field, Set extractExtendedAttributes(TokenStream stream, return extendedAttributes; } - private static List parseCharFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry, - Environment environment, boolean normalizer) throws IOException { + private static List parseCharFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, + AnalysisRegistry analysisRegistry, Environment environment, + boolean normalizer) throws IOException { List charFilterFactoryList = new ArrayList<>(); if (request.charFilters() != null && request.charFilters().size() > 0) { List charFilters = request.charFilters(); @@ -536,7 +549,8 @@ private static List parseCharFilterFactories(AnalyzeRequest r throw new IllegalArgumentException("failed to find global char filter under [" + charFilterTypeName + "]"); } // Need to set anonymous "name" of char_filter - charFilterFactory = charFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_charfilter", settings); + charFilterFactory = charFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_charfilter", + settings); } else { AnalysisModule.AnalysisProvider charFilterFactoryFactory; if (indexSettings == null) { @@ -608,9 +622,11 @@ public TokenFilterFactory apply(String s) { } } - private static List parseTokenFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry, - Environment environment, Tuple tokenizerFactory, - List charFilterFactoryList, boolean normalizer) throws IOException { + private static List parseTokenFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, + AnalysisRegistry analysisRegistry, Environment environment, + Tuple tokenizerFactory, + List charFilterFactoryList, + boolean normalizer) throws IOException { List tokenFilterFactoryList = new ArrayList<>(); DeferredTokenFilterRegistry deferredRegistry = new DeferredTokenFilterRegistry(analysisRegistry, indexSettings); if (request.tokenFilters() != null && request.tokenFilters().size() > 0) { @@ -630,7 +646,8 @@ private static List parseTokenFilterFactories(AnalyzeRequest throw new IllegalArgumentException("failed to find global token filter under [" + filterTypeName + "]"); } // Need to set anonymous "name" of tokenfilter - tokenFilterFactory = tokenFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenfilter", settings); + tokenFilterFactory = tokenFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenfilter", + settings); tokenFilterFactory = tokenFilterFactory.getChainAwareTokenFilterFactory(tokenizerFactory.v2(), charFilterFactoryList, tokenFilterFactoryList, deferredRegistry); @@ -650,8 +667,8 @@ private static List parseTokenFilterFactories(AnalyzeRequest Settings settings = AnalysisRegistry.getSettingsFromIndexSettings(indexSettings, AnalysisRegistry.INDEX_ANALYSIS_FILTER + "." + tokenFilter.name); tokenFilterFactory = tokenFilterFactoryFactory.get(indexSettings, environment, tokenFilter.name, settings); - tokenFilterFactory = tokenFilterFactory.getChainAwareTokenFilterFactory(tokenizerFactory.v2(), charFilterFactoryList, - tokenFilterFactoryList, deferredRegistry); + tokenFilterFactory = tokenFilterFactory.getChainAwareTokenFilterFactory(tokenizerFactory.v2(), + charFilterFactoryList, tokenFilterFactoryList, deferredRegistry); } } if (tokenFilterFactory == null) { @@ -709,7 +726,8 @@ private static Tuple parseTokenizerFactory(AnalyzeRequ return new Tuple<>(name, tokenizerFactory); } - private static TokenizerFactory getTokenizerFactory(AnalysisRegistry analysisRegistry, Environment environment, String name) throws IOException { + private static TokenizerFactory getTokenizerFactory(AnalysisRegistry analysisRegistry, Environment environment, + String name) throws IOException { AnalysisModule.AnalysisProvider tokenizerFactoryFactory; TokenizerFactory tokenizerFactory; tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(name); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java index 8cfe3d7b9096c..7a2bd9fd0f484 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBuilder { +public class ClearIndicesCacheRequestBuilder + extends BroadcastOperationRequestBuilder { public ClearIndicesCacheRequestBuilder(ElasticsearchClient client, ClearIndicesCacheAction action) { super(client, action, new ClearIndicesCacheRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index f2e284656e590..c612beea59520 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -58,7 +58,8 @@ public TransportCloseIndexAction(Settings settings, TransportService transportSe ThreadPool threadPool, MetaDataIndexStateService indexStateService, ClusterSettings clusterSettings, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { - super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, CloseIndexRequest::new); + super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + CloseIndexRequest::new); this.indexStateService = indexStateService; this.destructiveOperations = destructiveOperations; this.closeIndexEnabled = CLUSTER_INDICES_CLOSE_ENABLE_SETTING.get(settings); @@ -84,18 +85,21 @@ protected AcknowledgedResponse newResponse() { protected void doExecute(Task task, CloseIndexRequest request, ActionListener listener) { destructiveOperations.failDestructive(request.indices()); if (closeIndexEnabled == false) { - throw new IllegalStateException("closing indices is disabled - set [" + CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey() + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace"); + throw new IllegalStateException("closing indices is disabled - set [" + CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey() + + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace"); } super.doExecute(task, request, listener); } @Override protected ClusterBlockException checkBlock(CloseIndexRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request)); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, + indexNameExpressionResolver.concreteIndexNames(state, request)); } @Override - protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener listener) { + protected void masterOperation(final CloseIndexRequest request, final ClusterState state, + final ActionListener listener) { final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); if (concreteIndices == null || concreteIndices.length == 0) { listener.onResponse(new AcknowledgedResponse(true)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index d2593e7e94be3..93b4184f958bc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -34,7 +34,8 @@ /** * Builder for a create index request */ -public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder { +public class CreateIndexRequestBuilder + extends AcknowledgedRequestBuilder { public CreateIndexRequestBuilder(ElasticsearchClient client, CreateIndexAction action) { super(client, action, new CreateIndexRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index e4384745d36e8..58467b6cc6b62 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -44,7 +44,8 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final CreateIndexRequest request, final ClusterState state, + final ActionListener listener) { String cause = request.cause(); if (cause.length() == 0) { cause = "api"; } final String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index()); - final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index()) + final CreateIndexClusterStateUpdateRequest updateRequest = + new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index()) .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .settings(request.settings()).mappings(request.mappings()) .aliases(request.aliases()) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java index 376a115b19627..10663a8dece39 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java @@ -24,7 +24,8 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ElasticsearchClient; -public class DeleteIndexRequestBuilder extends AcknowledgedRequestBuilder { +public class DeleteIndexRequestBuilder + extends AcknowledgedRequestBuilder { public DeleteIndexRequestBuilder(ElasticsearchClient client, DeleteIndexAction action, String... indices) { super(client, action, new DeleteIndexRequest(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 62421da891608..a7080209eca4d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -53,8 +53,10 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, + final ActionListener listener) { final Set concreteIndices = new HashSet<>(Arrays.asList(indexNameExpressionResolver.concreteIndices(state, request))); if (concreteIndices.isEmpty()) { listener.onResponse(new AcknowledgedResponse(true)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java index 87da4627047ef..39a2ca7ef0a13 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class IndicesExistsRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class IndicesExistsRequestBuilder + extends MasterNodeReadOperationRequestBuilder { public IndicesExistsRequestBuilder(ElasticsearchClient client, IndicesExistsAction action, String... indices) { super(client, action, new IndicesExistsRequest(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java index 2310c463581a0..5f486210b617c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java @@ -41,8 +41,10 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction< @Inject public TransportIndicesExistsAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, IndicesExistsAction.NAME, transportService, clusterService, threadPool, actionFilters, IndicesExistsRequest::new, indexNameExpressionResolver); + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, IndicesExistsAction.NAME, transportService, clusterService, threadPool, actionFilters, IndicesExistsRequest::new, + indexNameExpressionResolver); } @Override @@ -59,12 +61,15 @@ protected IndicesExistsResponse newResponse() { @Override protected ClusterBlockException checkBlock(IndicesExistsRequest request, ClusterState state) { //make sure through indices options that the concrete indices call never throws IndexMissingException - IndicesOptions indicesOptions = IndicesOptions.fromOptions(true, true, request.indicesOptions().expandWildcardsOpen(), request.indicesOptions().expandWildcardsClosed()); - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, request.indices())); + IndicesOptions indicesOptions = IndicesOptions.fromOptions(true, true, request.indicesOptions().expandWildcardsOpen(), + request.indicesOptions().expandWildcardsClosed()); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, + indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, request.indices())); } @Override - protected void masterOperation(final IndicesExistsRequest request, final ClusterState state, final ActionListener listener) { + protected void masterOperation(final IndicesExistsRequest request, final ClusterState state, + final ActionListener listener) { boolean exists; try { // Similar as the previous behaviour, but now also aliases and wildcards are supported. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java index e63a27bef1818..223e738ad2068 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java @@ -40,8 +40,10 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadAction listener) { + protected void masterOperation(final TypesExistsRequest request, final ClusterState state, + final ActionListener listener) { String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), request.indices()); if (concreteIndices.length == 0) { listener.onResponse(new TypesExistsResponse(false)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java index f73dcdec22406..607a423605bfc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java @@ -27,7 +27,8 @@ * A builder for {@link TypesExistsRequest}. */ @Deprecated -public class TypesExistsRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class TypesExistsRequestBuilder + extends MasterNodeReadOperationRequestBuilder { /** * @param indices What indices to check for types diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index 7df54c1f123a1..35e19967a3e3a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -35,13 +35,15 @@ /** * Flush Action. */ -public class TransportFlushAction extends TransportBroadcastReplicationAction { +public class TransportFlushAction + extends TransportBroadcastReplicationAction { @Inject public TransportFlushAction(Settings settings, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportShardFlushAction replicatedFlushAction) { - super(FlushAction.NAME, FlushRequest::new, settings, clusterService, transportService, actionFilters, indexNameExpressionResolver, replicatedFlushAction); + super(FlushAction.NAME, FlushRequest::new, settings, clusterService, transportService, actionFilters, indexNameExpressionResolver, + replicatedFlushAction); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index ed1819a1d2480..344a817fa8b83 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -32,7 +32,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -public class TransportShardFlushAction extends TransportReplicationAction { +public class TransportShardFlushAction + extends TransportReplicationAction { public static final String NAME = FlushAction.NAME + "[s]"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java index 285ef99a70a0d..29f6891fd4bc2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java @@ -29,7 +29,8 @@ * merge down to. By default, will cause the force merge process to merge down * to half the configured number of segments. */ -public class ForceMergeRequestBuilder extends BroadcastOperationRequestBuilder { +public class ForceMergeRequestBuilder + extends BroadcastOperationRequestBuilder { public ForceMergeRequestBuilder(ElasticsearchClient client, ForceMergeAction action) { super(client, action, new ForceMergeRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index 94357575a9f72..621e2b870e90d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -43,7 +43,8 @@ /** * ForceMerge index/indices action. */ -public class TransportForceMergeAction extends TransportBroadcastByNodeAction { +public class TransportForceMergeAction + extends TransportBroadcastByNodeAction { private final IndicesService indicesService; @@ -62,7 +63,9 @@ protected EmptyResult readShardResult(StreamInput in) throws IOException { } @Override - protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalShards, int successfulShards, int failedShards, + List responses, List shardFailures, + ClusterState clusterState) { return new ForceMergeResponse(totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java index a80ba8bf2ce8d..cbd0539c24485 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java @@ -25,7 +25,8 @@ import org.elasticsearch.common.util.ArrayUtils; /** A helper class to build {@link GetFieldMappingsRequest} objects */ -public class GetFieldMappingsRequestBuilder extends ActionRequestBuilder { +public class GetFieldMappingsRequestBuilder + extends ActionRequestBuilder { public GetFieldMappingsRequestBuilder(ElasticsearchClient client, GetFieldMappingsAction action, String... indices) { super(client, action, new GetFieldMappingsRequest().indices(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 7ecb67139539f..f2e49ece9ea14 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class GetMappingsRequestBuilder extends ClusterInfoRequestBuilder { +public class GetMappingsRequestBuilder + extends ClusterInfoRequestBuilder { public GetMappingsRequestBuilder(ElasticsearchClient client, GetMappingsAction action, String... indices) { super(client, action, new GetMappingsRequest().indices(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index f9fc5880bbb5b..3ecd814194e1a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -58,7 +58,8 @@ /** * Transport action used to retrieve the mappings related to fields that belong to a specific index */ -public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAction { +public class TransportGetFieldMappingsIndexAction + extends TransportSingleShardAction { private static final String ACTION_NAME = GetFieldMappingsAction.NAME + "[index]"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index 9f29ffe4883bc..e18cd087666bd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -49,7 +49,8 @@ public class TransportPutMappingAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final PutMappingRequest request, final ClusterState state, + final ActionListener listener) { try { - final Index[] concreteIndices = request.getConcreteIndex() == null ? indexNameExpressionResolver.concreteIndices(state, request) : new Index[] {request.getConcreteIndex()}; + final Index[] concreteIndices = request.getConcreteIndex() == null ? + indexNameExpressionResolver.concreteIndices(state, request) + : new Index[] {request.getConcreteIndex()}; PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices).type(request.type()) @@ -93,12 +97,14 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t); + logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", + concreteIndices, request.type()), t); listener.onFailure(t); } }); } catch (IndexNotFoundException ex) { - logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex); + logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", + request.indices(), request.type()), ex); throw ex; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index 1e89244b67644..0c3863e71433b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -51,7 +51,8 @@ public TransportOpenIndexAction(Settings settings, TransportService transportSer ThreadPool threadPool, MetaDataIndexStateService indexStateService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { - super(settings, OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, OpenIndexRequest::new); + super(settings, OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + OpenIndexRequest::new); this.indexStateService = indexStateService; this.destructiveOperations = destructiveOperations; } @@ -75,11 +76,13 @@ protected void doExecute(Task task, OpenIndexRequest request, ActionListener listener) { + protected void masterOperation(final OpenIndexRequest request, final ClusterState state, + final ActionListener listener) { final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); if (concreteIndices == null || concreteIndices.length == 0) { listener.onResponse(new OpenIndexResponse(true, true)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index dc0a9adb0753c..eaeeaa6758079 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -69,7 +69,9 @@ protected RecoveryState readShardResult(StreamInput in) throws IOException { @Override - protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, + List responses, List shardFailures, + ClusterState clusterState) { Map> shardResponses = new HashMap<>(); for (RecoveryState recoveryState : responses) { if (recoveryState == null) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 05a72c044348c..5d8ce537eeea0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -37,14 +37,16 @@ /** * Refresh action. */ -public class TransportRefreshAction extends TransportBroadcastReplicationAction { +public class TransportRefreshAction + extends TransportBroadcastReplicationAction { @Inject public TransportRefreshAction(Settings settings, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportShardRefreshAction shardRefreshAction) { - super(RefreshAction.NAME, RefreshRequest::new, settings, clusterService, transportService, actionFilters, indexNameExpressionResolver, shardRefreshAction); + super(RefreshAction.NAME, RefreshRequest::new, settings, clusterService, transportService, actionFilters, + indexNameExpressionResolver, shardRefreshAction); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexSegments.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexSegments.java index 42e18a1fddc0f..94e85a6a73af0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexSegments.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexSegments.java @@ -45,7 +45,8 @@ public class IndexSegments implements Iterable { } indexShards = new HashMap<>(); for (Map.Entry> entry : tmpIndexShards.entrySet()) { - indexShards.put(entry.getKey(), new IndexShardSegments(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardSegments[entry.getValue().size()]))); + indexShards.put(entry.getKey(), new IndexShardSegments(entry.getValue().get(0).getShardRouting().shardId(), + entry.getValue().toArray(new ShardSegments[entry.getValue().size()]))); } } @@ -65,4 +66,4 @@ public Map getShards() { public Iterator iterator() { return indexShards.values().iterator(); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java index ce4a5705168b2..27ec8fb6a716e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class IndicesSegmentsRequestBuilder extends BroadcastOperationRequestBuilder { +public class IndicesSegmentsRequestBuilder + extends BroadcastOperationRequestBuilder { public IndicesSegmentsRequestBuilder(ElasticsearchClient client, IndicesSegmentsAction action) { super(client, action, new IndicesSegmentsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java index 6b624e6baa792..e50748ed27b4d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java @@ -41,13 +41,15 @@ import java.io.IOException; import java.util.List; -public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeAction { +public class TransportIndicesSegmentsAction + extends TransportBroadcastByNodeAction { private final IndicesService indicesService; @Inject public TransportIndicesSegmentsAction(Settings settings, ClusterService clusterService, TransportService transportService, - IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + IndicesService indicesService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, IndicesSegmentsAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver, IndicesSegmentsRequest::new, ThreadPool.Names.MANAGEMENT); this.indicesService = indicesService; @@ -77,8 +79,11 @@ protected ShardSegments readShardResult(StreamInput in) throws IOException { } @Override - protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, List results, List shardFailures, ClusterState clusterState) { - return new IndicesSegmentResponse(results.toArray(new ShardSegments[results.size()]), totalShards, successfulShards, failedShards, shardFailures); + protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, + List results, List shardFailures, + ClusterState clusterState) { + return new IndicesSegmentResponse(results.toArray(new ShardSegments[results.size()]), totalShards, successfulShards, failedShards, + shardFailures); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java index 2fff2eca0c263..c4b58e90e6ebf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java @@ -24,7 +24,8 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; -public class GetSettingsRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class GetSettingsRequestBuilder + extends MasterNodeReadOperationRequestBuilder { public GetSettingsRequestBuilder(ElasticsearchClient client, GetSettingsAction action, String... indices) { super(client, action, new GetSettingsRequest().indices(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java index 0ffc7efa9524c..a758776cd155d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java @@ -50,7 +50,8 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction listener) { + protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, + final ActionListener listener) { final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest() .indices(concreteIndices) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java index 834f3ba30148e..af29429785b19 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java @@ -31,7 +31,8 @@ /** * Builder for an update index settings request */ -public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder { +public class UpdateSettingsRequestBuilder + extends AcknowledgedRequestBuilder { public UpdateSettingsRequestBuilder(ElasticsearchClient client, UpdateSettingsAction action, String... indices) { super(client, action, new UpdateSettingsRequest(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java index cf38feae56f13..f44c24144030b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java @@ -28,7 +28,10 @@ /** * Request builder for {@link IndicesShardStoresRequest} */ -public class IndicesShardStoreRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class IndicesShardStoreRequestBuilder extends MasterNodeReadOperationRequestBuilder< + IndicesShardStoresRequest, + IndicesShardStoresResponse, + IndicesShardStoreRequestBuilder> { public IndicesShardStoreRequestBuilder(ElasticsearchClient client, Action action, String... indices) { super(client, action, new IndicesShardStoresRequest(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index 72aeb7f757528..d87de21bc48d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -276,7 +276,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws private ImmutableOpenMap>> storeStatuses; private List failures; - public IndicesShardStoresResponse(ImmutableOpenMap>> storeStatuses, List failures) { + public IndicesShardStoresResponse(ImmutableOpenMap>> storeStatuses, + List failures) { this.storeStatuses = storeStatuses; this.failures = failures; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 0741965f5e5c9..b64c376140266 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -59,17 +59,21 @@ import java.util.concurrent.ConcurrentLinkedQueue; /** - * Transport action that reads the cluster state for shards with the requested criteria (see {@link ClusterHealthStatus}) of specific indices - * and fetches store information from all the nodes using {@link TransportNodesListGatewayStartedShards} + * Transport action that reads the cluster state for shards with the requested criteria (see {@link ClusterHealthStatus}) of specific + * indices and fetches store information from all the nodes using {@link TransportNodesListGatewayStartedShards} */ -public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAction { +public class TransportIndicesShardStoresAction + extends TransportMasterNodeReadAction { private final TransportNodesListGatewayStartedShards listShardStoresInfo; @Inject - public TransportIndicesShardStoresAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, TransportNodesListGatewayStartedShards listShardStoresInfo) { - super(settings, IndicesShardStoresAction.NAME, transportService, clusterService, threadPool, actionFilters, IndicesShardStoresRequest::new, indexNameExpressionResolver); + public TransportIndicesShardStoresAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + TransportNodesListGatewayStartedShards listShardStoresInfo) { + super(settings, IndicesShardStoresAction.NAME, transportService, clusterService, threadPool, actionFilters, + IndicesShardStoresRequest::new, indexNameExpressionResolver); this.listShardStoresInfo = listShardStoresInfo; } @@ -84,7 +88,8 @@ protected IndicesShardStoresResponse newResponse() { } @Override - protected void masterOperation(IndicesShardStoresRequest request, ClusterState state, ActionListener listener) { + protected void masterOperation(IndicesShardStoresRequest request, ClusterState state, + ActionListener listener) { final RoutingTable routingTables = state.routingTable(); final RoutingNodes routingNodes = state.getRoutingNodes(); final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); @@ -116,7 +121,8 @@ protected void masterOperation(IndicesShardStoresRequest request, ClusterState s @Override protected ClusterBlockException checkBlock(IndicesShardStoresRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request)); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, + indexNameExpressionResolver.concreteIndexNames(state, request)); } private class AsyncShardStoresInfoFetches { @@ -127,7 +133,8 @@ private class AsyncShardStoresInfoFetches { private CountDown expectedOps; private final Queue fetchResponses; - AsyncShardStoresInfoFetches(DiscoveryNodes nodes, RoutingNodes routingNodes, Set shardIds, ActionListener listener) { + AsyncShardStoresInfoFetches(DiscoveryNodes nodes, RoutingNodes routingNodes, Set shardIds, + ActionListener listener) { this.nodes = nodes; this.routingNodes = routingNodes; this.shardIds = shardIds; @@ -154,7 +161,8 @@ private class InternalAsyncFetch extends AsyncShardFetch responses, List failures, long fetchingRound) { + protected synchronized void processAsyncFetch(List responses, List failures, + long fetchingRound) { fetchResponses.add(new Response(shardId, responses, failures)); if (expectedOps.countDown()) { finish(); @@ -162,37 +170,46 @@ protected synchronized void processAsyncFetch(List res } void finish() { - ImmutableOpenMap.Builder>> indicesStoreStatusesBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder>> + indicesStoreStatusesBuilder = ImmutableOpenMap.builder(); + java.util.List failureBuilder = new ArrayList<>(); for (Response fetchResponse : fetchResponses) { - ImmutableOpenIntMap> indexStoreStatuses = indicesStoreStatusesBuilder.get(fetchResponse.shardId.getIndexName()); + ImmutableOpenIntMap> indexStoreStatuses = + indicesStoreStatusesBuilder.get(fetchResponse.shardId.getIndexName()); final ImmutableOpenIntMap.Builder> indexShardsBuilder; if (indexStoreStatuses == null) { indexShardsBuilder = ImmutableOpenIntMap.builder(); } else { indexShardsBuilder = ImmutableOpenIntMap.builder(indexStoreStatuses); } - java.util.List storeStatuses = indexShardsBuilder.get(fetchResponse.shardId.id()); + java.util.List storeStatuses = indexShardsBuilder + .get(fetchResponse.shardId.id()); if (storeStatuses == null) { storeStatuses = new ArrayList<>(); } for (NodeGatewayStartedShards response : fetchResponse.responses) { if (shardExistsInNode(response)) { - IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndexName(), fetchResponse.shardId.id(), response.getNode()); - storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.allocationId(), allocationStatus, response.storeException())); + IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus( + fetchResponse.shardId.getIndexName(), fetchResponse.shardId.id(), response.getNode()); + storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.allocationId(), + allocationStatus, response.storeException())); } } CollectionUtil.timSort(storeStatuses); indexShardsBuilder.put(fetchResponse.shardId.id(), storeStatuses); indicesStoreStatusesBuilder.put(fetchResponse.shardId.getIndexName(), indexShardsBuilder.build()); for (FailedNodeException failure : fetchResponse.failures) { - failureBuilder.add(new IndicesShardStoresResponse.Failure(failure.nodeId(), fetchResponse.shardId.getIndexName(), fetchResponse.shardId.id(), failure.getCause())); + failureBuilder.add(new IndicesShardStoresResponse.Failure(failure.nodeId(), fetchResponse.shardId.getIndexName(), + fetchResponse.shardId.id(), failure.getCause())); } } - listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder))); + listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), + Collections.unmodifiableList(failureBuilder))); } - private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) { + private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, + DiscoveryNode node) { for (ShardRouting shardRouting : routingNodes.node(node.getId())) { ShardId shardId = shardRouting.shardId(); if (shardId.id() == shardID && shardId.getIndexName().equals(index)) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java index d8480519e5def..a36821a4b656a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java @@ -68,7 +68,8 @@ public Map getIndexShards() { } indexShards = new HashMap<>(); for (Map.Entry> entry : tmpIndexShards.entrySet()) { - indexShards.put(entry.getKey(), new IndexShardStats(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardStats[entry.getValue().size()]))); + indexShards.put(entry.getKey(), new IndexShardStats(entry.getValue().get(0).getShardRouting().shardId(), + entry.getValue().toArray(new ShardStats[entry.getValue().size()]))); } return indexShards; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java index 8e7afe3e7e308..525e6f13a908d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java @@ -31,7 +31,8 @@ * All the stats to be returned can be cleared using {@link #clear()}, at which point, specific * stats can be enabled. */ -public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder { +public class IndicesStatsRequestBuilder + extends BroadcastOperationRequestBuilder { public IndicesStatsRequestBuilder(ElasticsearchClient client, IndicesStatsAction action) { super(client, action, new IndicesStatsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index d09aa58938450..85af9b2d5b64b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -82,8 +82,11 @@ protected ShardStats readShardResult(StreamInput in) throws IOException { } @Override - protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { - return new IndicesStatsResponse(responses.toArray(new ShardStats[responses.size()]), totalShards, successfulShards, failedShards, shardFailures); + protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, + List responses, List shardFailures, + ClusterState clusterState) { + return new IndicesStatsResponse(responses.toArray(new ShardStats[responses.size()]), totalShards, successfulShards, failedShards, + shardFailures); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java index 5f1119f0f0db1..9826404c598d9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class DeleteIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder { +public class DeleteIndexTemplateRequestBuilder + extends MasterNodeOperationRequestBuilder { public DeleteIndexTemplateRequestBuilder(ElasticsearchClient client, DeleteIndexTemplateAction action) { super(client, action, new DeleteIndexTemplateRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index 7f9fc03210675..0d17b38d91902 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -37,7 +37,8 @@ /** * Delete index action. */ -public class TransportDeleteIndexTemplateAction extends TransportMasterNodeAction { +public class TransportDeleteIndexTemplateAction + extends TransportMasterNodeAction { private final MetaDataIndexTemplateService indexTemplateService; @@ -45,7 +46,8 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeActio public TransportDeleteIndexTemplateAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataIndexTemplateService indexTemplateService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, DeleteIndexTemplateAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteIndexTemplateRequest::new); + super(settings, DeleteIndexTemplateAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, DeleteIndexTemplateRequest::new); this.indexTemplateService = indexTemplateService; } @@ -66,18 +68,23 @@ protected ClusterBlockException checkBlock(DeleteIndexTemplateRequest request, C } @Override - protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, final ActionListener listener) { - indexTemplateService.removeTemplates(new MetaDataIndexTemplateService.RemoveRequest(request.name()).masterTimeout(request.masterNodeTimeout()), new MetaDataIndexTemplateService.RemoveListener() { - @Override - public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) { - listener.onResponse(new AcknowledgedResponse(response.acknowledged())); - } + protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, + final ActionListener listener) { + indexTemplateService.removeTemplates( + new MetaDataIndexTemplateService + .RemoveRequest(request.name()) + .masterTimeout(request.masterNodeTimeout()), + new MetaDataIndexTemplateService.RemoveListener() { + @Override + public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) { + listener.onResponse(new AcknowledgedResponse(response.acknowledged())); + } - @Override - public void onFailure(Exception e) { - logger.debug(() -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e); - listener.onFailure(e); - } - }); + @Override + public void onFailure(Exception e) { + logger.debug(() -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e); + listener.onFailure(e); + } + }); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java index 5068f11a0d201..58d3587518c09 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java @@ -21,7 +21,10 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequestBuilder< + GetIndexTemplatesRequest, + GetIndexTemplatesResponse, + GetIndexTemplatesRequestBuilder> { public GetIndexTemplatesRequestBuilder(ElasticsearchClient client, GetIndexTemplatesAction action) { super(client, action, new GetIndexTemplatesRequest()); @@ -31,3 +34,4 @@ public GetIndexTemplatesRequestBuilder(ElasticsearchClient client, GetIndexTempl super(client, action, new GetIndexTemplatesRequest(names)); } } + diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index 82c8bcec9b020..e66969854aa1f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -42,8 +42,10 @@ public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadAct @Inject public TransportGetIndexTemplatesAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetIndexTemplatesAction.NAME, transportService, clusterService, threadPool, actionFilters, GetIndexTemplatesRequest::new, indexNameExpressionResolver); + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, GetIndexTemplatesAction.NAME, transportService, clusterService, threadPool, actionFilters, + GetIndexTemplatesRequest::new, indexNameExpressionResolver); } @Override @@ -62,7 +64,8 @@ protected GetIndexTemplatesResponse newResponse() { } @Override - protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, ActionListener listener) { + protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, + ActionListener listener) { List results; // If we did not ask for a specific name, then we return all templates diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 34eccbf9d8a40..ae3a799453d67 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -47,8 +47,10 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction

listener) { + protected void masterOperation(final PutIndexTemplateRequest request, final ClusterState state, + final ActionListener listener) { String cause = request.cause(); if (cause.length() == 0) { cause = "api"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java index cae0fd6bfa6fe..68054595701ad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java @@ -45,7 +45,8 @@ public class IndexUpgradeStatus implements Iterable { } indexShards = new HashMap<>(); for (Map.Entry> entry : tmpIndexShards.entrySet()) { - indexShards.put(entry.getKey(), new IndexShardUpgradeStatus(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardUpgradeStatus[entry.getValue().size()]))); + indexShards.put(entry.getKey(), new IndexShardUpgradeStatus(entry.getValue().get(0).getShardRouting().shardId(), + entry.getValue().toArray(new ShardUpgradeStatus[entry.getValue().size()]))); } } @@ -91,4 +92,4 @@ public long getToUpgradeBytesAncient() { } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java index 603b25f6ab414..2958538f83bc9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java @@ -43,13 +43,15 @@ import java.io.IOException; import java.util.List; -public class TransportUpgradeStatusAction extends TransportBroadcastByNodeAction { +public class TransportUpgradeStatusAction + extends TransportBroadcastByNodeAction { private final IndicesService indicesService; @Inject public TransportUpgradeStatusAction(Settings settings, ClusterService clusterService, TransportService transportService, - IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + IndicesService indicesService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, UpgradeStatusAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpgradeStatusRequest::new, ThreadPool.Names.MANAGEMENT); this.indicesService = indicesService; @@ -79,8 +81,11 @@ protected ShardUpgradeStatus readShardResult(StreamInput in) throws IOException } @Override - protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { - return new UpgradeStatusResponse(responses.toArray(new ShardUpgradeStatus[responses.size()]), totalShards, successfulShards, failedShards, shardFailures); + protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, + List responses, + List shardFailures, ClusterState clusterState) { + return new UpgradeStatusResponse(responses.toArray(new ShardUpgradeStatus[responses.size()]), totalShards, successfulShards, + failedShards, shardFailures); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java index cee5bdcabe59d..e359b191ffdba 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class UpgradeStatusRequestBuilder extends BroadcastOperationRequestBuilder { +public class UpgradeStatusRequestBuilder + extends BroadcastOperationRequestBuilder { public UpgradeStatusRequestBuilder(ElasticsearchClient client, UpgradeStatusAction action) { super(client, action, new UpgradeStatusRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java index 918b8a06056d2..c44ad9e70bae7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -66,13 +66,16 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction shardUpgradeResults, List shardFailures, ClusterState clusterState) { + protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, int successfulShards, int failedShards, + List shardUpgradeResults, + List shardFailures, ClusterState clusterState) { Map successfulPrimaryShards = new HashMap<>(); Map> versions = new HashMap<>(); for (ShardUpgradeResult result : shardUpgradeResults) { @@ -111,8 +114,8 @@ protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, i if (primaryCount == metaData.index(index).getNumberOfShards()) { updatedVersions.put(index, new Tuple<>(versionEntry.getValue().v1(), versionEntry.getValue().v2().toString())); } else { - logger.warn("Not updating settings for the index [{}] because upgraded of some primary shards failed - expected[{}], received[{}]", index, - expectedPrimaryCount, primaryCount == null ? 0 : primaryCount); + logger.warn("Not updating settings for the index [{}] because upgraded of some primary shards failed - " + + "expected[{}], received[{}]", index, expectedPrimaryCount, primaryCount == null ? 0 : primaryCount); } } @@ -152,7 +155,8 @@ protected ShardsIterator shards(ClusterState clusterState, UpgradeRequest reques return iterator; } // If some primary shards are not available the request should fail. - throw new PrimaryMissingActionException("Cannot upgrade indices because the following indices are missing primary shards " + indicesWithMissingPrimaries); + throw new PrimaryMissingActionException("Cannot upgrade indices because the following indices are missing primary shards " + + indicesWithMissingPrimaries); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index 7c4aa406b2101..ff68a3e88a469 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -41,9 +41,11 @@ public class TransportUpgradeSettingsAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final UpgradeSettingsRequest request, final ClusterState state, + final ActionListener listener) { UpgradeSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpgradeSettingsClusterStateUpdateRequest() .ackTimeout(request.timeout()) .versions(request.versions()) @@ -78,7 +81,8 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug(() -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t); + logger.debug(() -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", + request.versions().keySet()), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java index e3a48066bbfe0..853077a67a752 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java @@ -30,7 +30,8 @@ /** * Builder for an update index settings request */ -public class UpgradeSettingsRequestBuilder extends AcknowledgedRequestBuilder { +public class UpgradeSettingsRequestBuilder + extends AcknowledgedRequestBuilder { public UpgradeSettingsRequestBuilder(ElasticsearchClient client, UpgradeSettingsAction action) { super(client, action, new UpgradeSettingsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 2b3c8a7bbcc33..5c13c1cde28db 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -59,7 +59,11 @@ import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.LongSupplier; -public class TransportValidateQueryAction extends TransportBroadcastAction { +public class TransportValidateQueryAction extends TransportBroadcastAction< + ValidateQueryRequest, + ValidateQueryResponse, + ShardValidateQueryRequest, + ShardValidateQueryResponse> { private final SearchService searchService; @@ -146,7 +150,8 @@ protected ClusterBlockException checkRequestBlock(ClusterState state, ValidateQu } @Override - protected ValidateQueryResponse newResponse(ValidateQueryRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { + protected ValidateQueryResponse newResponse(ValidateQueryRequest request, AtomicReferenceArray shardsResponses, + ClusterState clusterState) { int successfulShards = 0; int failedShards = 0; boolean valid = true; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java index bd8067e05cb9f..bf34f8b27b4fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java @@ -23,7 +23,8 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.index.query.QueryBuilder; -public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilder { +public class ValidateQueryRequestBuilder + extends BroadcastOperationRequestBuilder { public ValidateQueryRequestBuilder(ElasticsearchClient client, ValidateQueryAction action) { super(client, action, new ValidateQueryRequest()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java index c0404a47ab237..b0c2e34c30620 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java @@ -148,7 +148,8 @@ public void testNoIndexAnalyzers() throws IOException { request.text("the qu1ck brown fox"); request.tokenizer("standard"); request.addTokenFilter("mock"); - analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, maxTokenCount); + analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, + maxTokenCount); tokens = analyze.getTokens(); assertEquals(3, tokens.size()); assertEquals("qu1ck", tokens.get(0).getTerm()); @@ -160,7 +161,8 @@ public void testNoIndexAnalyzers() throws IOException { request.text("the qu1ck brown fox"); request.tokenizer("standard"); request.addCharFilter("append_foo"); - analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, maxTokenCount); + analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, + maxTokenCount); tokens = analyze.getTokens(); assertEquals(4, tokens.size()); assertEquals("the", tokens.get(0).getTerm()); @@ -174,7 +176,8 @@ public void testNoIndexAnalyzers() throws IOException { request.tokenizer("standard"); request.addCharFilter("append"); request.text("the qu1ck brown fox"); - analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, maxTokenCount); + analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, + maxTokenCount); tokens = analyze.getTokens(); assertEquals(4, tokens.size()); assertEquals("the", tokens.get(0).getTerm()); @@ -219,7 +222,8 @@ public void testWithIndexAnalyzers() throws IOException { AnalyzeRequest request = new AnalyzeRequest(); request.text("the quick brown fox"); request.analyzer("custom_analyzer"); - AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, maxTokenCount); + AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, + maxTokenCount); List tokens = analyze.getTokens(); assertEquals(3, tokens.size()); assertEquals("quick", tokens.get(0).getTerm()); @@ -333,7 +337,8 @@ public void testNonPreBuildTokenFilter() throws IOException { request.tokenizer("standard"); request.addTokenFilter("stop"); // stop token filter is not prebuilt in AnalysisModule#setupPreConfiguredTokenFilters() request.text("the quick brown fox"); - AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, maxTokenCount); + AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, + maxTokenCount); List tokens = analyze.getTokens(); assertEquals(3, tokens.size()); assertEquals("quick", tokens.get(0).getTerm()); @@ -345,7 +350,8 @@ public void testNormalizerWithIndex() throws IOException { AnalyzeRequest request = new AnalyzeRequest("index"); request.normalizer("my_normalizer"); request.text("ABc"); - AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, maxTokenCount); + AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, + maxTokenCount); List tokens = analyze.getTokens(); assertEquals(1, tokens.size()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java index ee1f4dd24e2f4..c4454adebb8e1 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java @@ -45,7 +45,8 @@ public void testClearIndicesCacheWithBlocks() { for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { try { enableIndexBlock("test", blockSetting); - ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setQueryCache(true).setFieldDataCache(true).execute().actionGet(); + ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().prepareClearCache("test") + .setFieldDataCache(true).setQueryCache(true).setFieldDataCache(true).execute().actionGet(); assertNoFailures(clearIndicesCacheResponse); assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { @@ -56,7 +57,8 @@ public void testClearIndicesCacheWithBlocks() { for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA, SETTING_READ_ONLY_ALLOW_DELETE)) { try { enableIndexBlock("test", blockSetting); - assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setQueryCache(true).setFieldDataCache(true)); + assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setQueryCache(true) + .setFieldDataCache(true)); } finally { disableIndexBlock("test", blockSetting); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java index 7040c92ec1d27..f6ca1c4f742a0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java @@ -107,7 +107,8 @@ public void testResponseStreaming() throws IOException { assertThat(originalShardResult.syncId(), equalTo(readShardResult.syncId())); assertThat(originalShardResult.totalShards(), equalTo(readShardResult.totalShards())); assertThat(originalShardResult.failedShards().size(), equalTo(readShardResult.failedShards().size())); - for (Map.Entry shardEntry : originalShardResult.failedShards().entrySet()) { + for (Map.Entry shardEntry + : originalShardResult.failedShards().entrySet()) { SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.failedShards().get(shardEntry.getKey()); assertNotNull(readShardResponse); SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); @@ -115,8 +116,10 @@ public void testResponseStreaming() throws IOException { assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); } assertThat(originalShardResult.shardResponses().size(), equalTo(readShardResult.shardResponses().size())); - for (Map.Entry shardEntry : originalShardResult.shardResponses().entrySet()) { - SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.shardResponses().get(shardEntry.getKey()); + for (Map.Entry shardEntry + : originalShardResult.shardResponses().entrySet()) { + SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.shardResponses() + .get(shardEntry.getKey()); assertNotNull(readShardResponse); SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); @@ -157,8 +160,8 @@ protected TestPlan createTestPlan() { } else { Map shardResponses = new HashMap<>(); for (int copy = 0; copy < replicas + 1; copy++) { - final ShardRouting shardRouting = TestShardRouting.newShardRouting(index, shard, "node_" + shardId + "_" + copy, null, - copy == 0, ShardRoutingState.STARTED); + final ShardRouting shardRouting = TestShardRouting.newShardRouting(index, shard, "node_" + shardId + "_" + copy, + null, copy == 0, ShardRoutingState.STARTED); if (randomInt(5) < 2) { // shard copy failure failed++; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java index 48914fca13133..91479e4bfe192 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java @@ -197,7 +197,8 @@ public void testGetIndexWithBlocks() { try { enableIndexBlock("idx", SETTING_BLOCKS_METADATA); - assertBlocked(client().admin().indices().prepareGetIndex().addIndices("idx").addFeatures(Feature.MAPPINGS, Feature.ALIASES), INDEX_METADATA_BLOCK); + assertBlocked(client().admin().indices().prepareGetIndex().addIndices("idx").addFeatures(Feature.MAPPINGS, Feature.ALIASES), + INDEX_METADATA_BLOCK); } finally { disableIndexBlock("idx", SETTING_BLOCKS_METADATA); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index 103e7db07a9fb..70d45d1db3ac6 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -117,7 +117,8 @@ public void testBasic() throws Exception { assertThat(shardStoresStatuses.size(), equalTo(unassignedShards.size())); for (IntObjectCursor> storesStatus : shardStoresStatuses) { assertThat("must report for one store", storesStatus.value.size(), equalTo(1)); - assertThat("reported store should be primary", storesStatus.value.get(0).getAllocationStatus(), equalTo(IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY)); + assertThat("reported store should be primary", storesStatus.value.get(0).getAllocationStatus(), + equalTo(IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY)); } logger.info("--> enable allocation"); enableAllocation(index); @@ -136,8 +137,10 @@ public void testIndices() throws Exception { indexRandomData(index1); indexRandomData(index2); ensureGreen(); - IndicesShardStoresResponse response = client().admin().indices().shardStores(Requests.indicesShardStoresRequest().shardStatuses("all")).get(); - ImmutableOpenMap>> shardStatuses = response.getStoreStatuses(); + IndicesShardStoresResponse response = client().admin().indices() + .shardStores(Requests.indicesShardStoresRequest().shardStatuses("all")).get(); + ImmutableOpenMap>> + shardStatuses = response.getStoreStatuses(); assertThat(shardStatuses.containsKey(index1), equalTo(true)); assertThat(shardStatuses.containsKey(index2), equalTo(true)); assertThat(shardStatuses.get(index1).size(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java index 661f47b38a8a4..2a2f32e6894dd 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java @@ -46,16 +46,21 @@ public class IndicesShardStoreResponseTests extends ESTestCase { public void testBasicSerialization() throws Exception { - ImmutableOpenMap.Builder>> indexStoreStatuses = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder>> + indexStoreStatuses = ImmutableOpenMap.builder(); + List failures = new ArrayList<>(); ImmutableOpenIntMap.Builder> storeStatuses = ImmutableOpenIntMap.builder(); DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); List storeStatusList = new ArrayList<>(); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, new IOException("corrupted"))); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, null, + IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, UUIDs.randomBase64UUID(), + IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), + IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, new IOException("corrupted"))); storeStatuses.put(0, storeStatusList); storeStatuses.put(1, storeStatusList); ImmutableOpenIntMap> storesMap = storeStatuses.build(); @@ -64,7 +69,8 @@ public void testBasicSerialization() throws Exception { failures.add(new IndicesShardStoresResponse.Failure("node1", "test", 3, new NodeDisconnectedException(node1, ""))); - IndicesShardStoresResponse storesResponse = new IndicesShardStoresResponse(indexStoreStatuses.build(), Collections.unmodifiableList(failures)); + IndicesShardStoresResponse storesResponse = new IndicesShardStoresResponse(indexStoreStatuses.build(), + Collections.unmodifiableList(failures)); XContentBuilder contentBuilder = XContentFactory.jsonBuilder(); contentBuilder.startObject(); storesResponse.toXContent(contentBuilder, ToXContent.EMPTY_PARAMS); @@ -117,14 +123,22 @@ public void testBasicSerialization() throws Exception { public void testStoreStatusOrdering() throws Exception { DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); List orderedStoreStatuses = new ArrayList<>(); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted"))); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted"))); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), + IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), + IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), + IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, + IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, + IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, + IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), + IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted"))); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, + IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted"))); List storeStatuses = new ArrayList<>(orderedStoreStatuses); Collections.shuffle(storeStatuses, random()); From 1b879ea8ac3a4feeb88b4be6cbe285fa16253fa4 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 26 Oct 2018 16:26:45 +0200 Subject: [PATCH 12/14] Refactor children aggregator into a generic ParentJoinAggregator (#34845) This commit adds a new ParentJoinAggregator that implements a join using global ordinals in a way that can be reused by the `children` and the upcoming `parent` aggregation. This new aggregator is a refactor of the existing ParentToChildrenAggregator with two main changes: * It uses a dense bit array instead of a long array when the aggregation does not have any parent. * It uses a single aggregator per bucket if it is nested under another aggregation. For the latter case we use a `MultiBucketAggregatorWrapper` in the factory in order to ensure that each instance of the aggregator handles a single bucket. This is more inlined with the strategy we use for other aggregations like `terms` aggregation for instance since the number of buckets to handle should be low (thanks to the breadth_first strategy). This change is also required for #34210 which adds the `parent` aggregation in the parent-join module. Relates #34508 --- .../ChildrenAggregatorFactory.java | 38 ++-- .../aggregations/ParentJoinAggregator.java | 173 ++++++++++++++++++ .../ParentToChildrenAggregator.java | 143 +-------------- .../composite => common/util}/BitArray.java | 6 +- .../bucket/composite/DoubleValuesSource.java | 3 +- .../bucket/composite/LongValuesSource.java | 3 +- .../util}/BitArrayTests.java | 5 +- 7 files changed, 209 insertions(+), 162 deletions(-) create mode 100644 modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java rename server/src/main/java/org/elasticsearch/{search/aggregations/bucket/composite => common/util}/BitArray.java (92%) rename server/src/test/java/org/elasticsearch/{search/aggregations/bucket/composite => common/util}/BitArrayTests.java (90%) diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java index 9c38fa2eae6b9..1f466f1020d18 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java @@ -35,39 +35,49 @@ import java.util.List; import java.util.Map; -public class ChildrenAggregatorFactory - extends ValuesSourceAggregatorFactory { +public class ChildrenAggregatorFactory extends ValuesSourceAggregatorFactory { private final Query parentFilter; private final Query childFilter; - public ChildrenAggregatorFactory(String name, ValuesSourceConfig config, - Query childFilter, Query parentFilter, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { + public ChildrenAggregatorFactory(String name, + ValuesSourceConfig config, + Query childFilter, + Query parentFilter, + SearchContext context, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); + this.childFilter = childFilter; this.parentFilter = parentFilter; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { + protected Aggregator createUnmapped(Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { return new NonCollectingAggregator(name, context, parent, pipelineAggregators, metaData) { - @Override public InternalAggregation buildEmptyAggregation() { return new InternalChildren(name, 0, buildEmptySubAggregations(), pipelineAggregators(), metaData()); } - }; } @Override - protected Aggregator doCreateInternal(WithOrdinals valuesSource, Aggregator parent, - boolean collectsFromSingleBucket, List pipelineAggregators, Map metaData) - throws IOException { + protected Aggregator doCreateInternal(WithOrdinals valuesSource, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + long maxOrd = valuesSource.globalMaxOrd(context.searcher()); - return new ParentToChildrenAggregator(name, factories, context, parent, childFilter, - parentFilter, valuesSource, maxOrd, pipelineAggregators, metaData); + if (collectsFromSingleBucket) { + return new ParentToChildrenAggregator(name, factories, context, parent, childFilter, + parentFilter, valuesSource, maxOrd, pipelineAggregators, metaData); + } else { + return asMultiBucketAggregator(this, context, parent); + } } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java new file mode 100644 index 0000000000000..46e358319a28a --- /dev/null +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.join.aggregations; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.Bits; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * An aggregator that joins documents based on global ordinals. + * Global ordinals that match the main query and the inFilter query are replayed + * with documents matching the outFilter query. + */ +public abstract class ParentJoinAggregator extends BucketsAggregator implements SingleBucketAggregator { + private final Weight inFilter; + private final Weight outFilter; + private final ValuesSource.Bytes.WithOrdinals valuesSource; + private final boolean singleAggregator; + + /** + * If this aggregator is nested under another aggregator we allocate a long hash per bucket. + */ + private final LongHash ordsHash; + /** + * Otherwise we use a dense bit array to record the global ordinals. + */ + private final BitArray ordsBit; + + public ParentJoinAggregator(String name, + AggregatorFactories factories, + SearchContext context, + Aggregator parent, + Query inFilter, + Query outFilter, + ValuesSource.Bytes.WithOrdinals valuesSource, + long maxOrd, + List pipelineAggregators, + Map metaData) throws IOException { + super(name, factories, context, parent, pipelineAggregators, metaData); + + if (maxOrd > Integer.MAX_VALUE) { + throw new IllegalStateException("the number of parent [" + maxOrd + "] + is greater than the allowed limit " + + "for this aggregation: " + Integer.MAX_VALUE); + } + + // these two filters are cached in the parser + this.inFilter = context.searcher().createWeight(context.searcher().rewrite(inFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); + this.outFilter = context.searcher().createWeight(context.searcher().rewrite(outFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); + this.valuesSource = valuesSource; + this.singleAggregator = parent == null; + this.ordsBit = singleAggregator ? new BitArray((int) maxOrd, context.bigArrays()) : null; + this.ordsHash = singleAggregator ? null : new LongHash(1, context.bigArrays()); + } + + private void addGlobalOrdinal(int globalOrdinal) { + if (singleAggregator) { + ordsBit.set(globalOrdinal); + } else { + ordsHash.add(globalOrdinal); + } + } + + private boolean existsGlobalOrdinal(int globalOrdinal) { + return singleAggregator ? ordsBit.get(globalOrdinal): ordsHash.find(globalOrdinal) >= 0; + } + + @Override + public final LeafBucketCollector getLeafCollector(LeafReaderContext ctx, + final LeafBucketCollector sub) throws IOException { + if (valuesSource == null) { + return LeafBucketCollector.NO_OP_COLLECTOR; + } + final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx); + final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), inFilter.scorerSupplier(ctx)); + return new LeafBucketCollector() { + @Override + public void collect(int docId, long bucket) throws IOException { + assert bucket == 0; + if (parentDocs.get(docId) && globalOrdinals.advanceExact(docId)) { + int globalOrdinal = (int) globalOrdinals.nextOrd(); + assert globalOrdinal != -1 && globalOrdinals.nextOrd() == SortedSetDocValues.NO_MORE_ORDS; + addGlobalOrdinal(globalOrdinal); + } + } + }; + } + + @Override + protected final void doPostCollection() throws IOException { + IndexReader indexReader = context().searcher().getIndexReader(); + for (LeafReaderContext ctx : indexReader.leaves()) { + Scorer childDocsScorer = outFilter.scorer(ctx); + if (childDocsScorer == null) { + continue; + } + DocIdSetIterator childDocsIter = childDocsScorer.iterator(); + + final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx); + + final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx); + // Set the scorer, since we now replay only the child docIds + sub.setScorer(new Scorable() { + @Override + public float score() { + return 1f; + } + + @Override + public int docID() { + return childDocsIter.docID(); + } + }); + + final Bits liveDocs = ctx.reader().getLiveDocs(); + for (int docId = childDocsIter.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = childDocsIter.nextDoc()) { + if (liveDocs != null && liveDocs.get(docId) == false) { + continue; + } + if (globalOrdinals.advanceExact(docId)) { + int globalOrdinal = (int) globalOrdinals.nextOrd(); + assert globalOrdinal != -1 && globalOrdinals.nextOrd() == SortedSetDocValues.NO_MORE_ORDS; + if (existsGlobalOrdinal(globalOrdinal)) { + collectBucket(sub, docId, 0); + } + } + } + } + } + + @Override + protected void doClose() { + Releasables.close(ordsBit, ordsHash); + } +} diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java index 064d1d1e5977c..3990e8697ef63 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java @@ -18,73 +18,28 @@ */ package org.elasticsearch.join.aggregations; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorable; -import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Weight; -import org.apache.lucene.util.Bits; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.util.LongArray; -import org.elasticsearch.common.util.LongObjectPagedHashMap; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.LeafBucketCollector; -import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; -import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -import java.util.Arrays; import java.util.List; import java.util.Map; -// The RecordingPerReaderBucketCollector assumes per segment recording which isn't the case for this -// aggregation, for this reason that collector can't be used -public class ParentToChildrenAggregator extends BucketsAggregator implements SingleBucketAggregator { +public class ParentToChildrenAggregator extends ParentJoinAggregator { static final ParseField TYPE_FIELD = new ParseField("type"); - private final Weight childFilter; - private final Weight parentFilter; - private final ValuesSource.Bytes.WithOrdinals valuesSource; - - // Maybe use PagedGrowableWriter? This will be less wasteful than LongArray, - // but then we don't have the reuse feature of BigArrays. - // Also if we know the highest possible value that a parent agg will create - // then we store multiple values into one slot - private final LongArray parentOrdToBuckets; - - // Only pay the extra storage price if the a parentOrd has multiple buckets - // Most of the times a parent doesn't have multiple buckets, since there is - // only one document per parent ord, - // only in the case of terms agg if a parent doc has multiple terms per - // field this is needed: - private final LongObjectPagedHashMap parentOrdToOtherBuckets; - private boolean multipleBucketsPerParentOrd = false; - public ParentToChildrenAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, Query childFilter, Query parentFilter, ValuesSource.Bytes.WithOrdinals valuesSource, - long maxOrd, List pipelineAggregators, Map metaData) - throws IOException { - super(name, factories, context, parent, pipelineAggregators, metaData); - // these two filters are cached in the parser - this.childFilter = context.searcher().createWeight(context.searcher().rewrite(childFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); - this.parentFilter = context.searcher().createWeight(context.searcher().rewrite(parentFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); - this.parentOrdToBuckets = context.bigArrays().newLongArray(maxOrd, false); - this.parentOrdToBuckets.fill(0, maxOrd, -1); - this.parentOrdToOtherBuckets = new LongObjectPagedHashMap<>(context.bigArrays()); - this.valuesSource = valuesSource; + long maxOrd, List pipelineAggregators, Map metaData) throws IOException { + super(name, factories, context, parent, parentFilter, childFilter, valuesSource, maxOrd, pipelineAggregators, metaData); } @Override @@ -99,96 +54,4 @@ public InternalAggregation buildEmptyAggregation() { metaData()); } - @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, - final LeafBucketCollector sub) throws IOException { - if (valuesSource == null) { - return LeafBucketCollector.NO_OP_COLLECTOR; - } - final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx); - final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), parentFilter.scorerSupplier(ctx)); - return new LeafBucketCollector() { - - @Override - public void collect(int docId, long bucket) throws IOException { - if (parentDocs.get(docId) && globalOrdinals.advanceExact(docId)) { - long globalOrdinal = globalOrdinals.nextOrd(); - assert globalOrdinals.nextOrd() == SortedSetDocValues.NO_MORE_ORDS; - if (globalOrdinal != -1) { - if (parentOrdToBuckets.get(globalOrdinal) == -1) { - parentOrdToBuckets.set(globalOrdinal, bucket); - } else { - long[] bucketOrds = parentOrdToOtherBuckets.get(globalOrdinal); - if (bucketOrds != null) { - bucketOrds = Arrays.copyOf(bucketOrds, bucketOrds.length + 1); - bucketOrds[bucketOrds.length - 1] = bucket; - parentOrdToOtherBuckets.put(globalOrdinal, bucketOrds); - } else { - parentOrdToOtherBuckets.put(globalOrdinal, new long[] { bucket }); - } - multipleBucketsPerParentOrd = true; - } - } - } - } - }; - } - - @Override - protected void doPostCollection() throws IOException { - IndexReader indexReader = context().searcher().getIndexReader(); - for (LeafReaderContext ctx : indexReader.leaves()) { - Scorer childDocsScorer = childFilter.scorer(ctx); - if (childDocsScorer == null) { - continue; - } - DocIdSetIterator childDocsIter = childDocsScorer.iterator(); - - final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx); - - final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx); - // Set the scorer, since we now replay only the child docIds - sub.setScorer(new Scorable() { - @Override - public float score() { - return 1f; - } - - @Override - public int docID() { - return childDocsIter.docID(); - } - }); - - final Bits liveDocs = ctx.reader().getLiveDocs(); - for (int docId = childDocsIter - .nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = childDocsIter - .nextDoc()) { - if (liveDocs != null && liveDocs.get(docId) == false) { - continue; - } - if (globalOrdinals.advanceExact(docId)) { - long globalOrdinal = globalOrdinals.nextOrd(); - assert globalOrdinals.nextOrd() == SortedSetDocValues.NO_MORE_ORDS; - long bucketOrd = parentOrdToBuckets.get(globalOrdinal); - if (bucketOrd != -1) { - collectBucket(sub, docId, bucketOrd); - if (multipleBucketsPerParentOrd) { - long[] otherBucketOrds = parentOrdToOtherBuckets.get(globalOrdinal); - if (otherBucketOrds != null) { - for (long otherBucketOrd : otherBucketOrds) { - collectBucket(sub, docId, otherBucketOrd); - } - } - } - } - } - } - } - } - - @Override - protected void doClose() { - Releasables.close(parentOrdToBuckets, parentOrdToOtherBuckets); - } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BitArray.java b/server/src/main/java/org/elasticsearch/common/util/BitArray.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BitArray.java rename to server/src/main/java/org/elasticsearch/common/util/BitArray.java index 6b35d7d2e2e0a..54fa4a669de29 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BitArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BitArray.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.bucket.composite; +package org.elasticsearch.common.util; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; @@ -30,11 +30,11 @@ * The underlying long array grows lazily based on the biggest index * that needs to be set. */ -final class BitArray implements Releasable { +public final class BitArray implements Releasable { private final BigArrays bigArrays; private LongArray bits; - BitArray(BigArrays bigArrays, int initialSize) { + public BitArray(int initialSize, BigArrays bigArrays) { this.bigArrays = bigArrays; this.bits = bigArrays.newLongArray(initialSize, true); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java index d243b0e75924e..633d919f140cc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; import org.elasticsearch.common.util.DoubleArray; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.MappedFieldType; @@ -48,7 +49,7 @@ class DoubleValuesSource extends SingleDimensionValuesSource { DocValueFormat format, boolean missingBucket, int size, int reverseMul) { super(bigArrays, format, fieldType, missingBucket, size, reverseMul); this.docValuesFunc = docValuesFunc; - this.bits = missingBucket ? new BitArray(bigArrays, 100) : null; + this.bits = missingBucket ? new BitArray(100, bigArrays) : null; this.values = bigArrays.newDoubleArray(Math.min(size, 100), false); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java index 6d5e9f7d6e251..e5ecbd6d00e20 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -61,7 +62,7 @@ class LongValuesSource extends SingleDimensionValuesSource { this.bigArrays = bigArrays; this.docValuesFunc = docValuesFunc; this.rounding = rounding; - this.bits = missingBucket ? new BitArray(bigArrays, Math.min(size, 100)) : null; + this.bits = missingBucket ? new BitArray(Math.min(size, 100), bigArrays) : null; this.values = bigArrays.newLongArray(Math.min(size, 100), false); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/BitArrayTests.java b/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java similarity index 90% rename from server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/BitArrayTests.java rename to server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java index 1806080260f28..518bbc08f4cf9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/BitArrayTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java @@ -17,9 +17,8 @@ * under the License. */ -package org.elasticsearch.search.aggregations.bucket.composite; +package org.elasticsearch.common.util; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -28,7 +27,7 @@ public class BitArrayTests extends ESTestCase { public void testRandom() { - try (BitArray bitArray = new BitArray(BigArrays.NON_RECYCLING_INSTANCE, 1)) { + try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { int numBits = randomIntBetween(1000, 10000); for (int step = 0; step < 3; step++) { boolean[] bits = new boolean[numBits]; From a39a67cd38981c51c974ea6b1f3291bd7970fe9a Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 26 Oct 2018 15:34:48 +0100 Subject: [PATCH 13/14] [ML] Extract common native process base class (#34856) We currently have two different native processes: autodetect & normalizer. There are plans for introducing a new process. All these share many things in common. This commit refactors the processes to extend an `AbstractNativeProcess` class that encapsulates those commonalities with the purpose of reusing the code for new processes in the future. --- .../xpack/core/ml/job/config/Detector.java | 2 +- .../writer/RecordWriter.java | 2 +- .../ml/job/config/AnalysisConfigTests.java | 2 +- .../core/ml/job/config/DetectorTests.java | 2 +- .../xpack/ml/MachineLearning.java | 9 +- .../xpack/ml/MachineLearningFeatureSet.java | 4 +- .../xpack/ml/MlLifeCycleService.java | 4 +- .../process/autodetect/AutodetectBuilder.java | 4 +- .../autodetect/AutodetectCommunicator.java | 2 +- .../process/autodetect/AutodetectProcess.java | 75 +---- .../autodetect/AutodetectProcessManager.java | 2 +- .../BlackHoleAutodetectProcess.java | 2 +- .../autodetect/NativeAutodetectProcess.java | 240 ++-------------- .../NativeAutodetectProcessFactory.java | 8 +- ...sor.java => AutodetectStateProcessor.java} | 28 +- .../writer/AbstractDataToProcessWriter.java | 1 + .../writer/ControlMsgToProcessWriter.java | 3 +- .../autodetect/writer/CsvRecordWriter.java | 2 +- .../MultiplyingNormalizerProcess.java | 45 ++- .../normalizer/NativeNormalizerProcess.java | 91 +----- .../NativeNormalizerProcessFactory.java | 31 +- .../process/normalizer/NormalizerProcess.java | 28 +- .../output/NormalizerResultHandler.java | 7 +- .../ml/process/AbstractNativeProcess.java | 265 ++++++++++++++++++ .../{job => }/process/NativeController.java | 4 +- .../process/NativeControllerHolder.java | 2 +- .../xpack/ml/process/NativeProcess.java | 85 ++++++ .../process/NativeStorageProvider.java | 2 +- .../ml/{job => }/process/ProcessPipes.java | 2 +- .../xpack/ml/process/StateProcessor.java | 14 + .../process/logging/CppLogMessage.java | 2 +- .../process/logging/CppLogMessageHandler.java | 2 +- .../writer/LengthEncodedWriter.java | 4 +- .../autodetect/AutodetectBuilderTests.java | 4 +- .../NativeAutodetectProcessTests.java | 12 +- ...ava => AutodetectStateProcessorTests.java} | 25 +- .../AbstractDataToProcessWriterTests.java | 1 + .../ControlMsgToProcessWriterTests.java | 1 + .../process/normalizer/NormalizerTests.java | 7 +- .../output/NormalizerResultHandlerTests.java | 3 +- .../process/NativeControllerTests.java | 2 +- .../process/NativeStorageProviderTests.java | 9 +- .../{job => }/process/ProcessPipesTests.java | 2 +- .../logging/CppLogMessageHandlerTests.java | 4 +- .../process/logging/CppLogMessageTests.java | 4 +- .../writer/LengthEncodedWriterTests.java | 2 +- 46 files changed, 560 insertions(+), 492 deletions(-) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/{job/process/autodetect => process}/writer/RecordWriter.java (93%) rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/{StateProcessor.java => AutodetectStateProcessor.java} (82%) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job => }/process/NativeController.java (98%) rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job => }/process/NativeControllerHolder.java (97%) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeProcess.java rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job => }/process/NativeStorageProvider.java (98%) rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job => }/process/ProcessPipes.java (99%) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/StateProcessor.java rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job => }/process/logging/CppLogMessage.java (99%) rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job => }/process/logging/CppLogMessageHandler.java (99%) rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job/process/autodetect => process}/writer/LengthEncodedWriter.java (95%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/{StateProcessorTests.java => AutodetectStateProcessorTests.java} (88%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/{job => }/process/NativeControllerTests.java (99%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/{job => }/process/NativeStorageProviderTests.java (99%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/{job => }/process/ProcessPipesTests.java (99%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/{job => }/process/logging/CppLogMessageHandlerTests.java (99%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/{job => }/process/logging/CppLogMessageTests.java (98%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/{job/process/autodetect => process}/writer/LengthEncodedWriterTests.java (99%) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java index b6275c6e0579a..d53e4cb74126d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java @@ -16,7 +16,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter; +import org.elasticsearch.xpack.core.ml.process.writer.RecordWriter; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/writer/RecordWriter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/process/writer/RecordWriter.java similarity index 93% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/writer/RecordWriter.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/process/writer/RecordWriter.java index 61b904246d50f..b66fd948a5a83 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/writer/RecordWriter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/process/writer/RecordWriter.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.ml.job.process.autodetect.writer; +package org.elasticsearch.xpack.core.ml.process.writer; import java.io.IOException; import java.util.List; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java index d691124a90a43..8843a336bde3d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter; +import org.elasticsearch.xpack.core.ml.process.writer.RecordWriter; import java.util.ArrayList; import java.util.Arrays; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java index 2f7eab0e97c70..fe546a371816d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter; +import org.elasticsearch.xpack.core.ml.process.writer.RecordWriter; import java.util.ArrayList; import java.util.Arrays; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index cdd3af133f6dc..2e90e678351c4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -168,8 +168,6 @@ import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; -import org.elasticsearch.xpack.ml.job.process.NativeController; -import org.elasticsearch.xpack.ml.job.process.NativeControllerHolder; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectBuilder; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessFactory; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; @@ -180,6 +178,8 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerFactory; import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerProcessFactory; import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.process.NativeController; +import org.elasticsearch.xpack.ml.process.NativeControllerHolder; import org.elasticsearch.xpack.ml.rest.RestDeleteExpiredDataAction; import org.elasticsearch.xpack.ml.rest.RestFindFileStructureAction; import org.elasticsearch.xpack.ml.rest.RestMlInfoAction; @@ -386,7 +386,7 @@ public Collection createComponents(Client client, ClusterService cluster nativeController, client, clusterService); - normalizerProcessFactory = new NativeNormalizerProcessFactory(environment, settings, nativeController); + normalizerProcessFactory = new NativeNormalizerProcessFactory(environment, nativeController); } catch (IOException e) { // This also should not happen in production, as the MachineLearningFeatureSet should have // hit the same error first and brought down the node with a friendlier error message @@ -396,8 +396,7 @@ public Collection createComponents(Client client, ClusterService cluster autodetectProcessFactory = (job, autodetectParams, executorService, onProcessCrash) -> new BlackHoleAutodetectProcess(job.getId()); // factor of 1.0 makes renormalization a no-op - normalizerProcessFactory = (jobId, quantilesState, bucketSpan, executorService) -> - new MultiplyingNormalizerProcess(settings, 1.0); + normalizerProcessFactory = (jobId, quantilesState, bucketSpan, executorService) -> new MultiplyingNormalizerProcess(1.0); } NormalizerFactory normalizerFactory = new NormalizerFactory(normalizerProcessFactory, threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java index b5ff2e2a7de6e..d9b8ea7cd4226 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java @@ -31,8 +31,8 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.ml.job.process.NativeController; -import org.elasticsearch.xpack.ml.job.process.NativeControllerHolder; +import org.elasticsearch.xpack.ml.process.NativeController; +import org.elasticsearch.xpack.ml.process.NativeControllerHolder; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.core.ml.stats.ForecastStats; import org.elasticsearch.xpack.core.ml.stats.StatsAccumulator; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java index b0a0eebc49df3..efc0517900ec4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java @@ -10,8 +10,8 @@ import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; -import org.elasticsearch.xpack.ml.job.process.NativeController; -import org.elasticsearch.xpack.ml.job.process.NativeControllerHolder; +import org.elasticsearch.xpack.ml.process.NativeController; +import org.elasticsearch.xpack.ml.process.NativeControllerHolder; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import java.io.IOException; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java index 4942200606dba..dbc565fc50c12 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java @@ -19,9 +19,9 @@ import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; -import org.elasticsearch.xpack.ml.job.process.NativeController; +import org.elasticsearch.xpack.ml.process.NativeController; import org.elasticsearch.xpack.ml.job.process.ProcessBuilderUtils; -import org.elasticsearch.xpack.ml.job.process.ProcessPipes; +import org.elasticsearch.xpack.ml.process.ProcessPipes; import org.elasticsearch.xpack.ml.job.process.autodetect.writer.AnalysisLimitsWriter; import org.elasticsearch.xpack.ml.job.process.autodetect.writer.FieldConfigWriter; import org.elasticsearch.xpack.ml.job.process.autodetect.writer.ModelPlotConfigWriter; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java index 0206bd88245b3..3f93d46b72737 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java @@ -264,7 +264,7 @@ public void forecastJob(ForecastParams params, BiConsumer handl public void persistJob(BiConsumer handler) { submitOperation(() -> { - autodetectProcess.persistJob(); + autodetectProcess.persistState(); return null; }, handler); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcess.java index 21be815d561a8..dab0c5aa49872 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcess.java @@ -10,23 +10,22 @@ import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.ml.job.persistence.StateStreamer; import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.ForecastParams; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.ml.job.results.AutodetectResult; +import org.elasticsearch.xpack.ml.process.NativeProcess; -import java.io.Closeable; import java.io.IOException; -import java.time.ZonedDateTime; import java.util.Iterator; import java.util.List; /** * Interface representing the native C++ autodetect process */ -public interface AutodetectProcess extends Closeable { +public interface AutodetectProcess extends NativeProcess { /** * Restore state from the given {@link ModelSnapshot} @@ -35,22 +34,6 @@ public interface AutodetectProcess extends Closeable { */ void restoreState(StateStreamer stateStreamer, ModelSnapshot modelSnapshot); - /** - * Is the process ready to receive data? - * @return {@code true} if the process is ready to receive data - */ - boolean isReady(); - - /** - * Write the record to autodetect. The record parameter should not be encoded - * (i.e. length encoded) the implementation will appy the corrrect encoding. - * - * @param record Plain array of strings, implementors of this class should - * encode the record appropriately - * @throws IOException If the write failed - */ - void writeRecord(String[] record) throws IOException; - /** * Write the reset buckets control message * @@ -115,60 +98,8 @@ void writeUpdateDetectorRulesMessage(int detectorIndex, List rule */ void forecastJob(ForecastParams params) throws IOException; - /** - * Ask the job to start persisting model state in the background - * @throws IOException If writing the request fails - */ - void persistJob() throws IOException; - - /** - * Flush the output data stream - */ - void flushStream() throws IOException; - - /** - * Kill the process. Do not wait for it to stop gracefully. - */ - void kill() throws IOException; - /** * @return stream of autodetect results. */ Iterator readAutodetectResults(); - - /** - * The time the process was started - * @return Process start time - */ - ZonedDateTime getProcessStartTime(); - - /** - * Returns true if the process still running. - * Methods such as {@link #flushJob(FlushJobParams)} are essentially - * asynchronous the command will be continue to execute in the process after - * the call has returned. This method tests whether something catastrophic - * occurred in the process during its execution. - * @return True if the process is still running - */ - boolean isProcessAlive(); - - /** - * Check whether autodetect terminated given maximum 45ms for termination - * - * Processing errors are highly likely caused by autodetect being unexpectedly - * terminated. - * - * Workaround: As we can not easily check if autodetect is alive, we rely on - * the logPipe being ended. As the loghandler runs in another thread which - * might fall behind this one, we give it a grace period of 45ms. - * - * @return false if process has ended for sure, true if it probably still runs - */ - boolean isProcessAliveAfterWaiting(); - - /** - * Read any content in the error output buffer. - * @return An error message or empty String if no error. - */ - String readError(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index ec6b67da1dca8..8dbc13038c7f7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -50,7 +50,7 @@ import org.elasticsearch.xpack.ml.job.persistence.ScheduledEventsQueryBuilder; import org.elasticsearch.xpack.ml.job.persistence.StateStreamer; import org.elasticsearch.xpack.ml.job.process.DataCountsReporter; -import org.elasticsearch.xpack.ml.job.process.NativeStorageProvider; +import org.elasticsearch.xpack.ml.process.NativeStorageProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutoDetectResultProcessor; import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/BlackHoleAutodetectProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/BlackHoleAutodetectProcess.java index 8ff54e80785c5..e1b69d78894db 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/BlackHoleAutodetectProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/BlackHoleAutodetectProcess.java @@ -96,7 +96,7 @@ public String flushJob(FlushJobParams params) throws IOException { } @Override - public void persistJob() { + public void persistState() { } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcess.java index faae29fd1eb56..112805b2f7414 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcess.java @@ -5,300 +5,116 @@ */ package org.elasticsearch.xpack.ml.job.process.autodetect; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.ml.job.persistence.StateStreamer; -import org.elasticsearch.xpack.ml.job.process.NativeControllerHolder; import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutodetectResultsParser; -import org.elasticsearch.xpack.ml.job.process.autodetect.output.StateProcessor; import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.ForecastParams; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.ml.job.process.autodetect.writer.ControlMsgToProcessWriter; -import org.elasticsearch.xpack.ml.job.process.autodetect.writer.LengthEncodedWriter; -import org.elasticsearch.xpack.ml.job.process.logging.CppLogMessageHandler; import org.elasticsearch.xpack.ml.job.results.AutodetectResult; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.process.AbstractNativeProcess; -import java.io.BufferedOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.nio.file.Files; import java.nio.file.Path; -import java.time.Duration; -import java.time.ZonedDateTime; import java.util.Iterator; import java.util.List; -import java.util.Objects; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; /** * Autodetect process using native code. */ -class NativeAutodetectProcess implements AutodetectProcess { - private static final Logger LOGGER = Loggers.getLogger(NativeAutodetectProcess.class); +class NativeAutodetectProcess extends AbstractNativeProcess implements AutodetectProcess { - private static final Duration WAIT_FOR_KILL_TIMEOUT = Duration.ofMillis(1000); + private static final Logger LOGGER = LogManager.getLogger(NativeAutodetectProcess.class); + + private static final String NAME = "autodetect"; - private final String jobId; - private final CppLogMessageHandler cppLogHandler; - private final OutputStream processInStream; - private final InputStream processOutStream; - private final OutputStream processRestoreStream; - private final LengthEncodedWriter recordWriter; - private final ZonedDateTime startTime; - private final int numberOfFields; - private final List filesToDelete; - private final Runnable onProcessCrash; - private volatile Future logTailFuture; - private volatile Future stateProcessorFuture; - private volatile boolean processCloseInitiated; - private volatile boolean processKilled; - private volatile boolean isReady; private final AutodetectResultsParser resultsParser; NativeAutodetectProcess(String jobId, InputStream logStream, OutputStream processInStream, InputStream processOutStream, OutputStream processRestoreStream, int numberOfFields, List filesToDelete, AutodetectResultsParser resultsParser, Runnable onProcessCrash) { - this.jobId = jobId; - cppLogHandler = new CppLogMessageHandler(jobId, logStream); - this.processInStream = new BufferedOutputStream(processInStream); - this.processOutStream = processOutStream; - this.processRestoreStream = processRestoreStream; - this.recordWriter = new LengthEncodedWriter(this.processInStream); - startTime = ZonedDateTime.now(); - this.numberOfFields = numberOfFields; - this.filesToDelete = filesToDelete; + super(jobId, logStream, processInStream, processOutStream, processRestoreStream, numberOfFields, filesToDelete, onProcessCrash); this.resultsParser = resultsParser; - this.onProcessCrash = Objects.requireNonNull(onProcessCrash); } - public void start(ExecutorService executorService, StateProcessor stateProcessor, InputStream persistStream) { - logTailFuture = executorService.submit(() -> { - try (CppLogMessageHandler h = cppLogHandler) { - h.tailStream(); - } catch (IOException e) { - if (processKilled == false) { - LOGGER.error(new ParameterizedMessage("[{}] Error tailing autodetect process logs", jobId), e); - } - } finally { - if (processCloseInitiated == false && processKilled == false) { - // The log message doesn't say "crashed", as the process could have been killed - // by a user or other process (e.g. the Linux OOM killer) - - String errors = cppLogHandler.getErrors(); - LOGGER.error("[{}] autodetect process stopped unexpectedly: {}", jobId, errors); - onProcessCrash.run(); - } - } - }); - stateProcessorFuture = executorService.submit(() -> { - try (InputStream in = persistStream) { - stateProcessor.process(jobId, in); - if (processKilled == false) { - LOGGER.info("[{}] State output finished", jobId); - } - } catch (IOException e) { - if (processKilled == false) { - LOGGER.error(new ParameterizedMessage("[{}] Error reading autodetect state output", jobId), e); - } - } - }); + @Override + public String getName() { + return NAME; } @Override public void restoreState(StateStreamer stateStreamer, ModelSnapshot modelSnapshot) { if (modelSnapshot != null) { - try (OutputStream r = processRestoreStream) { - stateStreamer.restoreStateToStream(jobId, modelSnapshot, r); + try (OutputStream r = processRestoreStream()) { + stateStreamer.restoreStateToStream(jobId(), modelSnapshot, r); } catch (Exception e) { // TODO: should we fail to start? - if (processKilled == false) { - LOGGER.error("Error restoring model state for job " + jobId, e); + if (isProcessKilled() == false) { + LOGGER.error("Error restoring model state for job " + jobId(), e); } } } - isReady = true; - } - - @Override - public boolean isReady() { - return isReady; - } - - @Override - public void writeRecord(String[] record) throws IOException { - recordWriter.writeRecord(record); + setReady(); } @Override public void writeResetBucketsControlMessage(DataLoadParams params) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeResetBucketsMessage(params); + newMessageWriter().writeResetBucketsMessage(params); } @Override public void writeUpdateModelPlotMessage(ModelPlotConfig modelPlotConfig) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeUpdateModelPlotMessage(modelPlotConfig); + newMessageWriter().writeUpdateModelPlotMessage(modelPlotConfig); } @Override public void writeUpdateDetectorRulesMessage(int detectorIndex, List rules) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeUpdateDetectorRulesMessage(detectorIndex, rules); + newMessageWriter().writeUpdateDetectorRulesMessage(detectorIndex, rules); } @Override public void writeUpdateFiltersMessage(List filters) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeUpdateFiltersMessage(filters); + newMessageWriter().writeUpdateFiltersMessage(filters); } @Override public void writeUpdateScheduledEventsMessage(List events, TimeValue bucketSpan) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeUpdateScheduledEventsMessage(events, bucketSpan); + newMessageWriter().writeUpdateScheduledEventsMessage(events, bucketSpan); } @Override public String flushJob(FlushJobParams params) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); + ControlMsgToProcessWriter writer = newMessageWriter(); writer.writeFlushControlMessage(params); return writer.writeFlushMessage(); } @Override public void forecastJob(ForecastParams params) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeForecastMessage(params); - } - - @Override - public void persistJob() throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeStartBackgroundPersistMessage(); - } - - @Override - public void flushStream() throws IOException { - recordWriter.flush(); - } - - @Override - public void close() throws IOException { - try { - processCloseInitiated = true; - // closing its input causes the process to exit - processInStream.close(); - // wait for the process to exit by waiting for end-of-file on the named pipe connected - // to the state processor - it may take a long time for all the model state to be - // indexed - if (stateProcessorFuture != null) { - stateProcessorFuture.get(MachineLearningField.STATE_PERSIST_RESTORE_TIMEOUT.getMinutes(), TimeUnit.MINUTES); - } - // the log processor should have stopped by now too - assume processing the logs will - // take no more than 5 seconds longer than processing the state (usually it should - // finish first) - if (logTailFuture != null) { - logTailFuture.get(5, TimeUnit.SECONDS); - } - - if (cppLogHandler.seenFatalError()) { - throw ExceptionsHelper.serverError(cppLogHandler.getErrors()); - } - LOGGER.debug("[{}] Autodetect process exited", jobId); - } catch (ExecutionException | TimeoutException e) { - LOGGER.warn(new ParameterizedMessage("[{}] Exception closing the running autodetect process", jobId), e); - } catch (InterruptedException e) { - LOGGER.warn(new ParameterizedMessage("[{}] Exception closing the running autodetect process", jobId), e); - Thread.currentThread().interrupt(); - } finally { - deleteAssociatedFiles(); - } + newMessageWriter().writeForecastMessage(params); } @Override - public void kill() throws IOException { - processKilled = true; - try { - // The PID comes via the processes log stream. We don't wait for it to arrive here, - // but if the wait times out it implies the process has only just started, in which - // case it should die very quickly when we close its input stream. - NativeControllerHolder.getNativeController().killProcess(cppLogHandler.getPid(Duration.ZERO)); - - // Wait for the process to die before closing processInStream as if the process - // is still alive when processInStream is closed autodetect will start persisting state - cppLogHandler.waitForLogStreamClose(WAIT_FOR_KILL_TIMEOUT); - } catch (TimeoutException e) { - LOGGER.warn("[{}] Failed to get PID of autodetect process to kill", jobId); - } finally { - try { - processInStream.close(); - } catch (IOException e) { - // Ignore it - we're shutting down and the method itself has logged a warning - } - try { - deleteAssociatedFiles(); - } catch (IOException e) { - // Ignore it - we're shutting down and the method itself has logged a warning - } - } - } - - private synchronized void deleteAssociatedFiles() throws IOException { - if (filesToDelete == null) { - return; - } - - for (Path fileToDelete : filesToDelete) { - if (Files.deleteIfExists(fileToDelete)) { - LOGGER.debug("[{}] Deleted file {}", jobId, fileToDelete.toString()); - } else { - LOGGER.warn("[{}] Failed to delete file {}", jobId, fileToDelete.toString()); - } - } - - filesToDelete.clear(); + public void persistState() throws IOException { + newMessageWriter().writeStartBackgroundPersistMessage(); } @Override public Iterator readAutodetectResults() { - return resultsParser.parseResults(processOutStream); + return resultsParser.parseResults(processOutStream()); } - @Override - public ZonedDateTime getProcessStartTime() { - return startTime; - } - - @Override - public boolean isProcessAlive() { - // Sanity check: make sure the process hasn't terminated already - return !cppLogHandler.hasLogStreamEnded(); - } - - @Override - public boolean isProcessAliveAfterWaiting() { - cppLogHandler.waitForLogStreamClose(Duration.ofMillis(45)); - return isProcessAlive(); - } - - @Override - public String readError() { - return cppLogHandler.getErrors(); + private ControlMsgToProcessWriter newMessageWriter() { + return new ControlMsgToProcessWriter(recordWriter(), numberOfFields()); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java index 06055476f7642..ea31c5de4dffa 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java @@ -16,10 +16,10 @@ import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.job.process.NativeController; -import org.elasticsearch.xpack.ml.job.process.ProcessPipes; +import org.elasticsearch.xpack.ml.process.NativeController; +import org.elasticsearch.xpack.ml.process.ProcessPipes; import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutodetectResultsParser; -import org.elasticsearch.xpack.ml.job.process.autodetect.output.StateProcessor; +import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutodetectStateProcessor; import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams; import org.elasticsearch.xpack.ml.utils.NamedPipeHelper; @@ -67,7 +67,7 @@ public AutodetectProcess createAutodetectProcess(Job job, // The extra 1 is the control field int numberOfFields = job.allInputFields().size() + (includeTokensField ? 1 : 0) + 1; - StateProcessor stateProcessor = new StateProcessor(settings, client); + AutodetectStateProcessor stateProcessor = new AutodetectStateProcessor(client, job.getId()); AutodetectResultsParser resultsParser = new AutodetectResultsParser(settings); NativeAutodetectProcess autodetect = new NativeAutodetectProcess( job.getId(), processPipes.getLogStream().get(), processPipes.getProcessInStream().get(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessor.java similarity index 82% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessor.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessor.java index ec62901d65a6e..63a496f0503bc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessor.java @@ -5,17 +5,18 @@ */ package org.elasticsearch.xpack.ml.job.process.autodetect.output; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.CompositeBytesReference; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; +import org.elasticsearch.xpack.ml.process.StateProcessor; import java.io.IOException; import java.io.InputStream; @@ -28,17 +29,22 @@ /** * Reads the autodetect state and persists via a bulk request */ -public class StateProcessor extends AbstractComponent { +public class AutodetectStateProcessor implements StateProcessor { + + private static final Logger LOGGER = LogManager.getLogger(AutodetectStateProcessor.class); private static final int READ_BUF_SIZE = 8192; + private final Client client; + private final String jobId; - public StateProcessor(Settings settings, Client client) { - super(settings); + public AutodetectStateProcessor(Client client, String jobId) { this.client = client; + this.jobId = jobId; } - public void process(String jobId, InputStream in) throws IOException { + @Override + public void process(InputStream in) throws IOException { BytesReference bytesToDate = null; List newBlocks = new ArrayList<>(); byte[] readBuf = new byte[READ_BUF_SIZE]; @@ -56,7 +62,7 @@ public void process(String jobId, InputStream in) throws IOException { } else { BytesReference newBytes = new CompositeBytesReference(newBlocks.toArray(new BytesReference[0])); bytesToDate = (bytesToDate == null) ? newBytes : new CompositeBytesReference(bytesToDate, newBytes); - bytesToDate = splitAndPersist(jobId, bytesToDate, searchFrom); + bytesToDate = splitAndPersist(bytesToDate, searchFrom); searchFrom = (bytesToDate == null) ? 0 : bytesToDate.length(); newBlocks.clear(); } @@ -69,7 +75,7 @@ public void process(String jobId, InputStream in) throws IOException { * data is expected to be a series of Elasticsearch bulk requests in UTF-8 JSON * (as would be uploaded to the public REST API) separated by zero bytes ('\0'). */ - private BytesReference splitAndPersist(String jobId, BytesReference bytesRef, int searchFrom) throws IOException { + private BytesReference splitAndPersist(BytesReference bytesRef, int searchFrom) throws IOException { int splitFrom = 0; while (true) { int nextZeroByte = findNextZeroByte(bytesRef, searchFrom, splitFrom); @@ -80,7 +86,7 @@ private BytesReference splitAndPersist(String jobId, BytesReference bytesRef, in // Ignore completely empty chunks if (nextZeroByte > splitFrom) { // No validation - assume the native process has formatted the state correctly - persist(jobId, bytesRef.slice(splitFrom, nextZeroByte - splitFrom)); + persist(bytesRef.slice(splitFrom, nextZeroByte - splitFrom)); } splitFrom = nextZeroByte + 1; } @@ -90,11 +96,11 @@ private BytesReference splitAndPersist(String jobId, BytesReference bytesRef, in return bytesRef.slice(splitFrom, bytesRef.length() - splitFrom); } - void persist(String jobId, BytesReference bytes) throws IOException { + void persist(BytesReference bytes) throws IOException { BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(bytes, AnomalyDetectorsIndex.jobStateIndexName(), ElasticsearchMappings.DOC_TYPE, XContentType.JSON); if (bulkRequest.numberOfActions() > 0) { - logger.trace("[{}] Persisting job state document", jobId); + LOGGER.trace("[{}] Persisting job state document", jobId); try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { client.bulk(bulkRequest).actionGet(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java index 7961fec449774..dc9d77cd68784 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.ml.job.process.DataCountsReporter; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcess; +import org.elasticsearch.xpack.ml.process.writer.LengthEncodedWriter; import org.supercsv.encoder.CsvEncoder; import org.supercsv.encoder.DefaultCsvEncoder; import org.supercsv.prefs.CsvPreference; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriter.java index 2c026ec15506e..fc98990d8d61f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriter.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.ForecastParams; +import org.elasticsearch.xpack.ml.process.writer.LengthEncodedWriter; import java.io.IOException; import java.io.OutputStream; @@ -168,7 +169,7 @@ public void writeForecastMessage(ForecastParams params) throws IOException { builder.field("tmp_storage", params.getTmpStorage()); } builder.endObject(); - + writeMessage(FORECAST_MESSAGE_CODE + Strings.toString(builder)); fillCommandBuffer(); lengthEncodedWriter.flush(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvRecordWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvRecordWriter.java index 2228835bea2a6..57bbb69c5d0de 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvRecordWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvRecordWriter.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.ml.job.process.autodetect.writer; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter; +import org.elasticsearch.xpack.core.ml.process.writer.RecordWriter; import org.supercsv.io.CsvListWriter; import org.supercsv.prefs.CsvPreference; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/MultiplyingNormalizerProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/MultiplyingNormalizerProcess.java index 8aa266e15d22e..5d320a1bd715c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/MultiplyingNormalizerProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/MultiplyingNormalizerProcess.java @@ -5,9 +5,8 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -17,6 +16,7 @@ import java.io.IOException; import java.io.PipedInputStream; import java.io.PipedOutputStream; +import java.time.ZonedDateTime; /** * Normalizer process that doesn't use native code. @@ -27,16 +27,15 @@ * - It can be used to produce results in testing that do not vary based on changes to the real normalization algorithms */ public class MultiplyingNormalizerProcess implements NormalizerProcess { - private static final Logger LOGGER = Loggers.getLogger(MultiplyingNormalizerProcess.class); - private final Settings settings; + private static final Logger LOGGER = LogManager.getLogger(MultiplyingNormalizerProcess.class); + private final double factor; private final PipedInputStream processOutStream; private XContentBuilder builder; private boolean shouldIgnoreHeader; - public MultiplyingNormalizerProcess(Settings settings, double factor) { - this.settings = settings; + public MultiplyingNormalizerProcess(double factor) { this.factor = factor; processOutStream = new PipedInputStream(); try { @@ -49,6 +48,11 @@ public MultiplyingNormalizerProcess(Settings settings, double factor) { shouldIgnoreHeader = true; } + @Override + public boolean isReady() { + return true; + } + @Override public void writeRecord(String[] record) throws IOException { if (shouldIgnoreHeader) { @@ -77,13 +81,33 @@ public void writeRecord(String[] record) throws IOException { } @Override - public void close() throws IOException { + public void persistState() { + // Nothing to do + } + + @Override + public void flushStream() { + // Nothing to do + } + + @Override + public void kill() { + // Nothing to do + } + + @Override + public ZonedDateTime getProcessStartTime() { + return null; + } + + @Override + public void close() { builder.close(); } @Override public NormalizerResultHandler createNormalizedResultsHandler() { - return new NormalizerResultHandler(settings, processOutStream); + return new NormalizerResultHandler(processOutStream); } @Override @@ -92,6 +116,11 @@ public boolean isProcessAlive() { return true; } + @Override + public boolean isProcessAliveAfterWaiting() { + return true; + } + @Override public String readError() { return ""; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcess.java index ee6c7818b38ec..6b67ffa6acb6f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcess.java @@ -5,104 +5,41 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.xpack.ml.job.process.autodetect.writer.LengthEncodedWriter; -import org.elasticsearch.xpack.ml.job.process.logging.CppLogMessageHandler; import org.elasticsearch.xpack.ml.job.process.normalizer.output.NormalizerResultHandler; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.process.AbstractNativeProcess; -import java.io.BufferedOutputStream; -import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; +import java.util.Collections; /** * Normalizer process using native code. */ -class NativeNormalizerProcess implements NormalizerProcess { - private static final Logger LOGGER = Loggers.getLogger(NativeNormalizerProcess.class); +class NativeNormalizerProcess extends AbstractNativeProcess implements NormalizerProcess { - private final String jobId; - private final Settings settings; - private final CppLogMessageHandler cppLogHandler; - private final OutputStream processInStream; - private final InputStream processOutStream; - private final LengthEncodedWriter recordWriter; - private volatile boolean processCloseInitiated; - private Future logTailThread; + private static final String NAME = "normalizer"; - NativeNormalizerProcess(String jobId, Settings settings, InputStream logStream, OutputStream processInStream, - InputStream processOutStream, ExecutorService executorService) throws EsRejectedExecutionException { - this.jobId = jobId; - this.settings = settings; - cppLogHandler = new CppLogMessageHandler(jobId, logStream); - this.processInStream = new BufferedOutputStream(processInStream); - this.processOutStream = processOutStream; - this.recordWriter = new LengthEncodedWriter(this.processInStream); - logTailThread = executorService.submit(() -> { - try (CppLogMessageHandler h = cppLogHandler) { - h.tailStream(); - } catch (IOException e) { - LOGGER.error(new ParameterizedMessage("[{}] Error tailing normalizer process logs", - new Object[] { jobId }), e); - } finally { - if (processCloseInitiated == false) { - // The log message doesn't say "crashed", as the process could have been killed - // by a user or other process (e.g. the Linux OOM killer) - LOGGER.error("[{}] normalizer process stopped unexpectedly", jobId); - } - } - }); + NativeNormalizerProcess(String jobId, InputStream logStream, OutputStream processInStream, InputStream processOutStream) { + super(jobId, logStream, processInStream, processOutStream, null, 0, Collections.emptyList(), () -> {}); } @Override - public void writeRecord(String[] record) throws IOException { - recordWriter.writeRecord(record); + public String getName() { + return NAME; } @Override - public void close() throws IOException { - try { - processCloseInitiated = true; - // closing its input causes the process to exit - processInStream.close(); - // wait for the process to exit by waiting for end-of-file on the named pipe connected to its logger - // this may take a long time as it persists the model state - logTailThread.get(5, TimeUnit.MINUTES); - if (cppLogHandler.seenFatalError()) { - throw ExceptionsHelper.serverError(cppLogHandler.getErrors()); - } - LOGGER.debug("[{}] Normalizer process exited", jobId); - } catch (ExecutionException | TimeoutException e) { - LOGGER.warn(new ParameterizedMessage("[{}] Exception closing the running normalizer process", new Object[] { jobId }), e); - } catch (InterruptedException e) { - LOGGER.warn("[{}] Exception closing the running normalizer process", jobId); - Thread.currentThread().interrupt(); - } + public boolean isReady() { + return true; } @Override - public NormalizerResultHandler createNormalizedResultsHandler() { - return new NormalizerResultHandler(settings, processOutStream); + public void persistState() { + // nothing to persist } @Override - public boolean isProcessAlive() { - // Sanity check: make sure the process hasn't terminated already - return !cppLogHandler.hasLogStreamEnded(); - } - - @Override - public String readError() { - return cppLogHandler.getErrors(); + public NormalizerResultHandler createNormalizedResultsHandler() { + return new NormalizerResultHandler(processOutStream()); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcessFactory.java index 60f52d3f44288..21f7229aef123 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcessFactory.java @@ -5,13 +5,14 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.job.process.NativeController; -import org.elasticsearch.xpack.ml.job.process.ProcessPipes; +import org.elasticsearch.xpack.ml.process.NativeController; +import org.elasticsearch.xpack.ml.process.ProcessPipes; import org.elasticsearch.xpack.ml.utils.NamedPipeHelper; import java.io.IOException; @@ -22,17 +23,15 @@ public class NativeNormalizerProcessFactory implements NormalizerProcessFactory { - private static final Logger LOGGER = Loggers.getLogger(NativeNormalizerProcessFactory.class); + private static final Logger LOGGER = LogManager.getLogger(NativeNormalizerProcessFactory.class); private static final NamedPipeHelper NAMED_PIPE_HELPER = new NamedPipeHelper(); private static final Duration PROCESS_STARTUP_TIMEOUT = Duration.ofSeconds(10); private final Environment env; - private final Settings settings; private final NativeController nativeController; - public NativeNormalizerProcessFactory(Environment env, Settings settings, NativeController nativeController) { + public NativeNormalizerProcessFactory(Environment env, NativeController nativeController) { this.env = Objects.requireNonNull(env); - this.settings = Objects.requireNonNull(settings); this.nativeController = Objects.requireNonNull(nativeController); } @@ -43,8 +42,20 @@ public NormalizerProcess createNormalizerProcess(String jobId, String quantilesS true, false, true, true, false, false); createNativeProcess(jobId, quantilesState, processPipes, bucketSpan); - return new NativeNormalizerProcess(jobId, settings, processPipes.getLogStream().get(), - processPipes.getProcessInStream().get(), processPipes.getProcessOutStream().get(), executorService); + NativeNormalizerProcess normalizerProcess = new NativeNormalizerProcess(jobId, processPipes.getLogStream().get(), + processPipes.getProcessInStream().get(), processPipes.getProcessOutStream().get()); + + try { + normalizerProcess.start(executorService); + return normalizerProcess; + } catch (EsRejectedExecutionException e) { + try { + IOUtils.close(normalizerProcess); + } catch (IOException ioe) { + LOGGER.error("Can't close normalizer", ioe); + } + throw e; + } } private void createNativeProcess(String jobId, String quantilesState, ProcessPipes processPipes, Integer bucketSpan) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerProcess.java index d0ce62612bb69..230048c5b4d2e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerProcess.java @@ -6,40 +6,16 @@ package org.elasticsearch.xpack.ml.job.process.normalizer; import org.elasticsearch.xpack.ml.job.process.normalizer.output.NormalizerResultHandler; - -import java.io.Closeable; -import java.io.IOException; +import org.elasticsearch.xpack.ml.process.NativeProcess; /** * Interface representing the native C++ normalizer process */ -public interface NormalizerProcess extends Closeable { - - /** - * Write the record to normalizer. The record parameter should not be encoded - * (i.e. length encoded) the implementation will appy the corrrect encoding. - * - * @param record Plain array of strings, implementors of this class should - * encode the record appropriately - * @throws IOException If the write failed - */ - void writeRecord(String[] record) throws IOException; +public interface NormalizerProcess extends NativeProcess { /** * Create a result handler for this process's results. * @return results handler */ NormalizerResultHandler createNormalizedResultsHandler(); - - /** - * Returns true if the process still running. - * @return True if the process is still running - */ - boolean isProcessAlive(); - - /** - * Read any content in the error output buffer. - * @return An error message or empty String if no error. - */ - String readError(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandler.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandler.java index dcadef7a24b53..3b65a739e82a9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandler.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandler.java @@ -8,8 +8,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.CompositeBytesReference; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; @@ -26,15 +24,14 @@ /** * Reads normalizer output. */ -public class NormalizerResultHandler extends AbstractComponent { +public class NormalizerResultHandler { private static final int READ_BUF_SIZE = 1024; private final InputStream inputStream; private final List normalizedResults; - public NormalizerResultHandler(Settings settings, InputStream inputStream) { - super(settings); + public NormalizerResultHandler(InputStream inputStream) { this.inputStream = inputStream; normalizedResults = new ArrayList<>(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java new file mode 100644 index 0000000000000..b84bfdd38e19a --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java @@ -0,0 +1,265 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.process; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.xpack.core.ml.MachineLearningField; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.process.logging.CppLogMessageHandler; +import org.elasticsearch.xpack.ml.process.writer.LengthEncodedWriter; + +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Duration; +import java.time.ZonedDateTime; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * Abstract class for implementing a native process. + */ +public abstract class AbstractNativeProcess implements NativeProcess { + + private static final Logger LOGGER = LogManager.getLogger(AbstractNativeProcess.class); + + private static final Duration WAIT_FOR_KILL_TIMEOUT = Duration.ofMillis(1000); + + private final String jobId; + private final CppLogMessageHandler cppLogHandler; + private final OutputStream processInStream; + private final InputStream processOutStream; + private final OutputStream processRestoreStream; + private final LengthEncodedWriter recordWriter; + private final ZonedDateTime startTime; + private final int numberOfFields; + private final List filesToDelete; + private final Runnable onProcessCrash; + private volatile Future logTailFuture; + private volatile Future stateProcessorFuture; + private volatile boolean processCloseInitiated; + private volatile boolean processKilled; + private volatile boolean isReady; + + protected AbstractNativeProcess(String jobId, InputStream logStream, OutputStream processInStream, InputStream processOutStream, + OutputStream processRestoreStream, int numberOfFields, List filesToDelete, + Runnable onProcessCrash) { + this.jobId = jobId; + cppLogHandler = new CppLogMessageHandler(jobId, logStream); + this.processInStream = new BufferedOutputStream(processInStream); + this.processOutStream = processOutStream; + this.processRestoreStream = processRestoreStream; + this.recordWriter = new LengthEncodedWriter(this.processInStream); + startTime = ZonedDateTime.now(); + this.numberOfFields = numberOfFields; + this.filesToDelete = filesToDelete; + this.onProcessCrash = Objects.requireNonNull(onProcessCrash); + } + + public abstract String getName(); + + /** + * Starts a process that does not persist any state + * @param executorService the executor service to run on + */ + public void start(ExecutorService executorService) { + logTailFuture = executorService.submit(() -> { + try (CppLogMessageHandler h = cppLogHandler) { + h.tailStream(); + } catch (IOException e) { + if (processKilled == false) { + LOGGER.error(new ParameterizedMessage("[{}] Error tailing {} process logs", jobId, getName()), e); + } + } finally { + if (processCloseInitiated == false && processKilled == false) { + // The log message doesn't say "crashed", as the process could have been killed + // by a user or other process (e.g. the Linux OOM killer) + + String errors = cppLogHandler.getErrors(); + LOGGER.error("[{}] {} process stopped unexpectedly: {}", jobId, getName(), errors); + onProcessCrash.run(); + } + } + }); + } + + /** + * Starts a process that may persist its state + * @param executorService the executor service to run on + * @param stateProcessor the state processor + * @param persistStream the stream where the state is persisted + */ + public void start(ExecutorService executorService, StateProcessor stateProcessor, InputStream persistStream) { + start(executorService); + + stateProcessorFuture = executorService.submit(() -> { + try (InputStream in = persistStream) { + stateProcessor.process(in); + if (processKilled == false) { + LOGGER.info("[{}] State output finished", jobId); + } + } catch (IOException e) { + if (processKilled == false) { + LOGGER.error(new ParameterizedMessage("[{}] Error reading {} state output", jobId, getName()), e); + } + } + }); + } + + @Override + public boolean isReady() { + return isReady; + } + + protected void setReady() { + isReady = true; + } + + @Override + public void writeRecord(String[] record) throws IOException { + recordWriter.writeRecord(record); + } + + @Override + public void flushStream() throws IOException { + recordWriter.flush(); + } + + @Override + public void close() throws IOException { + try { + processCloseInitiated = true; + // closing its input causes the process to exit + processInStream.close(); + // wait for the process to exit by waiting for end-of-file on the named pipe connected + // to the state processor - it may take a long time for all the model state to be + // indexed + if (stateProcessorFuture != null) { + stateProcessorFuture.get(MachineLearningField.STATE_PERSIST_RESTORE_TIMEOUT.getMinutes(), TimeUnit.MINUTES); + } + // the log processor should have stopped by now too - assume processing the logs will + // take no more than 5 seconds longer than processing the state (usually it should + // finish first) + if (logTailFuture != null) { + logTailFuture.get(5, TimeUnit.SECONDS); + } + + if (cppLogHandler.seenFatalError()) { + throw ExceptionsHelper.serverError(cppLogHandler.getErrors()); + } + LOGGER.debug("[{}] {} process exited", jobId, getName()); + } catch (ExecutionException | TimeoutException e) { + LOGGER.warn(new ParameterizedMessage("[{}] Exception closing the running {} process", jobId, getName()), e); + } catch (InterruptedException e) { + LOGGER.warn(new ParameterizedMessage("[{}] Exception closing the running {} process", jobId, getName()), e); + Thread.currentThread().interrupt(); + } finally { + deleteAssociatedFiles(); + } + } + + @Override + public void kill() throws IOException { + processKilled = true; + try { + // The PID comes via the processes log stream. We don't wait for it to arrive here, + // but if the wait times out it implies the process has only just started, in which + // case it should die very quickly when we close its input stream. + NativeControllerHolder.getNativeController().killProcess(cppLogHandler.getPid(Duration.ZERO)); + + // Wait for the process to die before closing processInStream as if the process + // is still alive when processInStream is closed it may start persisting state + cppLogHandler.waitForLogStreamClose(WAIT_FOR_KILL_TIMEOUT); + } catch (TimeoutException e) { + LOGGER.warn("[{}] Failed to get PID of {} process to kill", jobId, getName()); + } finally { + try { + processInStream.close(); + } catch (IOException e) { + // Ignore it - we're shutting down and the method itself has logged a warning + } + try { + deleteAssociatedFiles(); + } catch (IOException e) { + // Ignore it - we're shutting down and the method itself has logged a warning + } + } + } + + private synchronized void deleteAssociatedFiles() throws IOException { + if (filesToDelete == null) { + return; + } + + for (Path fileToDelete : filesToDelete) { + if (Files.deleteIfExists(fileToDelete)) { + LOGGER.debug("[{}] Deleted file {}", jobId, fileToDelete.toString()); + } else { + LOGGER.warn("[{}] Failed to delete file {}", jobId, fileToDelete.toString()); + } + } + + filesToDelete.clear(); + } + + @Override + public ZonedDateTime getProcessStartTime() { + return startTime; + } + + @Override + public boolean isProcessAlive() { + // Sanity check: make sure the process hasn't terminated already + return !cppLogHandler.hasLogStreamEnded(); + } + + @Override + public boolean isProcessAliveAfterWaiting() { + cppLogHandler.waitForLogStreamClose(Duration.ofMillis(45)); + return isProcessAlive(); + } + + @Override + public String readError() { + return cppLogHandler.getErrors(); + } + + protected String jobId() { + return jobId; + } + + protected InputStream processOutStream() { + return processOutStream; + } + + @Nullable + protected OutputStream processRestoreStream() { + return processRestoreStream; + } + + protected int numberOfFields() { + return numberOfFields; + } + + protected LengthEncodedWriter recordWriter() { + return recordWriter; + } + + protected boolean isProcessKilled() { + return processKilled; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeController.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java similarity index 98% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeController.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java index 0b9cb833c8980..747074028953c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeController.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java @@ -3,13 +3,13 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.env.Environment; -import org.elasticsearch.xpack.ml.job.process.logging.CppLogMessageHandler; +import org.elasticsearch.xpack.ml.process.logging.CppLogMessageHandler; import org.elasticsearch.xpack.ml.utils.NamedPipeHelper; import java.io.BufferedOutputStream; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeControllerHolder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java similarity index 97% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeControllerHolder.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java index 9bcb6e787290d..67e24b44a8494 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeControllerHolder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.ml.MachineLearningField; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeProcess.java new file mode 100644 index 0000000000000..c4f2b4a463185 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeProcess.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.process; + +import java.io.Closeable; +import java.io.IOException; +import java.time.ZonedDateTime; + +/** + * Interface representing a native C++ process + */ +public interface NativeProcess extends Closeable { + + /** + * Is the process ready to receive data? + * @return {@code true} if the process is ready to receive data + */ + boolean isReady(); + + /** + * Write the record to the process. The record parameter should not be encoded + * (i.e. length encoded) the implementation will apply the correct encoding. + * + * @param record Plain array of strings, implementors of this class should + * encode the record appropriately + * @throws IOException If the write failed + */ + void writeRecord(String[] record) throws IOException; + + /** + * Ask the process to persist its state in the background + * @throws IOException If writing the request fails + */ + void persistState() throws IOException; + + /** + * Flush the output data stream + */ + void flushStream() throws IOException; + + /** + * Kill the process. Do not wait for it to stop gracefully. + */ + void kill() throws IOException; + + /** + * The time the process was started + * @return Process start time + */ + ZonedDateTime getProcessStartTime(); + + /** + * Returns true if the process still running. + * Methods instructing the process are essentially + * asynchronous; the command will be continue to execute in the process after + * the call has returned. + * This method tests whether something catastrophic + * occurred in the process during its execution. + * @return True if the process is still running + */ + boolean isProcessAlive(); + + /** + * Check whether the process terminated given a grace period. + * + * Processing errors are highly likely caused by the process being unexpectedly + * terminated. + * + * Workaround: As we can not easily check if the process is alive, we rely on + * the logPipe being ended. As the loghandler runs in another thread which + * might fall behind this one, we give it a grace period. + * + * @return false if process has ended for sure, true if it probably still runs + */ + boolean isProcessAliveAfterWaiting(); + + /** + * Read any content in the error output buffer. + * @return An error message or empty String if no error. + */ + String readError(); +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java similarity index 98% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProvider.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java index 8a0268a8d0793..9670fadfefff3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/ProcessPipes.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/ProcessPipes.java similarity index 99% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/ProcessPipes.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/ProcessPipes.java index 41a7df348b103..4d468f80176f9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/ProcessPipes.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/ProcessPipes.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.elasticsearch.common.Strings; import org.elasticsearch.env.Environment; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/StateProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/StateProcessor.java new file mode 100644 index 0000000000000..e3937d7199131 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/StateProcessor.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.process; + +import java.io.IOException; +import java.io.InputStream; + +public interface StateProcessor { + + void process(InputStream in) throws IOException; +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessage.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessage.java similarity index 99% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessage.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessage.java index 6064cfef31b18..c3310b6b1b5b8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessage.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessage.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.logging; +package org.elasticsearch.xpack.ml.process.logging; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandler.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java similarity index 99% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandler.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java index af0f199dd0c58..341b9ae371b82 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandler.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.logging; +package org.elasticsearch.xpack.ml.process.logging; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/LengthEncodedWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/writer/LengthEncodedWriter.java similarity index 95% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/LengthEncodedWriter.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/writer/LengthEncodedWriter.java index 34f9d8dc469fc..e82c963b5ed6c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/LengthEncodedWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/writer/LengthEncodedWriter.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.autodetect.writer; +package org.elasticsearch.xpack.ml.process.writer; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter; +import org.elasticsearch.xpack.core.ml.process.writer.RecordWriter; import java.io.IOException; import java.io.OutputStream; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilderTests.java index 325ad52864bfa..9ef56d927f553 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilderTests.java @@ -15,8 +15,8 @@ import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.ml.job.process.NativeController; -import org.elasticsearch.xpack.ml.job.process.ProcessPipes; +import org.elasticsearch.xpack.ml.process.NativeController; +import org.elasticsearch.xpack.ml.process.ProcessPipes; import org.junit.Before; import java.nio.file.Path; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java index 93e79c8b97078..6d5adeb3fdbf1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutodetectResultsParser; -import org.elasticsearch.xpack.ml.job.process.autodetect.output.StateProcessor; +import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutodetectStateProcessor; import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.TimeRange; @@ -56,7 +56,7 @@ public void testProcessStartTime() throws Exception { mock(OutputStream.class), mock(InputStream.class), mock(OutputStream.class), NUMBER_FIELDS, null, new AutodetectResultsParser(Settings.EMPTY), mock(Runnable.class))) { - process.start(executorService, mock(StateProcessor.class), mock(InputStream.class)); + process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); ZonedDateTime startTime = process.getProcessStartTime(); Thread.sleep(500); @@ -76,7 +76,7 @@ public void testWriteRecord() throws IOException { try (NativeAutodetectProcess process = new NativeAutodetectProcess("foo", logStream, bos, mock(InputStream.class), mock(OutputStream.class), NUMBER_FIELDS, Collections.emptyList(), new AutodetectResultsParser(Settings.EMPTY), mock(Runnable.class))) { - process.start(executorService, mock(StateProcessor.class), mock(InputStream.class)); + process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); process.writeRecord(record); process.flushStream(); @@ -108,7 +108,7 @@ public void testFlush() throws IOException { try (NativeAutodetectProcess process = new NativeAutodetectProcess("foo", logStream, bos, mock(InputStream.class), mock(OutputStream.class), NUMBER_FIELDS, Collections.emptyList(), new AutodetectResultsParser(Settings.EMPTY), mock(Runnable.class))) { - process.start(executorService, mock(StateProcessor.class), mock(InputStream.class)); + process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); FlushJobParams params = FlushJobParams.builder().build(); process.flushJob(params); @@ -128,7 +128,7 @@ public void testWriteUpdateConfigMessage() throws IOException { } public void testPersistJob() throws IOException { - testWriteMessage(p -> p.persistJob(), ControlMsgToProcessWriter.BACKGROUND_PERSIST_MESSAGE_CODE); + testWriteMessage(p -> p.persistState(), ControlMsgToProcessWriter.BACKGROUND_PERSIST_MESSAGE_CODE); } public void testWriteMessage(CheckedConsumer writeFunction, String expectedMessageCode) throws IOException { @@ -138,7 +138,7 @@ public void testWriteMessage(CheckedConsumer writeFunct try (NativeAutodetectProcess process = new NativeAutodetectProcess("foo", logStream, bos, mock(InputStream.class), mock(OutputStream.class), NUMBER_FIELDS, Collections.emptyList(), new AutodetectResultsParser(Settings.EMPTY), mock(Runnable.class))) { - process.start(executorService, mock(StateProcessor.class), mock(InputStream.class)); + process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); writeFunction.accept(process); process.writeUpdateModelPlotMessage(new ModelPlotConfig()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessorTests.java similarity index 88% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessorTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessorTests.java index 31b96d8393d12..e4fb5a7f07456 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessorTests.java @@ -26,7 +26,6 @@ import java.util.List; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -37,7 +36,7 @@ /** * Tests for reading state from the native process. */ -public class StateProcessorTests extends ESTestCase { +public class AutodetectStateProcessorTests extends ESTestCase { private static final String STATE_SAMPLE = "" + "{\"index\": {\"_index\": \"test\", \"_type\": \"type1\", \"_id\": \"1\"}}\n" @@ -50,18 +49,20 @@ public class StateProcessorTests extends ESTestCase { + "{ \"field\" : \"value3\" }\n" + "\0"; + private static final String JOB_ID = "state-processor-test-job"; + private static final int NUM_LARGE_DOCS = 2; private static final int LARGE_DOC_SIZE = 1000000; private Client client; - private StateProcessor stateProcessor; + private AutodetectStateProcessor stateProcessor; @Before public void initialize() throws IOException { client = mock(Client.class); @SuppressWarnings("unchecked") ActionFuture bulkResponseFuture = mock(ActionFuture.class); - stateProcessor = spy(new StateProcessor(Settings.EMPTY, client)); + stateProcessor = spy(new AutodetectStateProcessor(client, JOB_ID)); when(client.bulk(any(BulkRequest.class))).thenReturn(bulkResponseFuture); ThreadPool threadPool = mock(ThreadPool.class); when(client.threadPool()).thenReturn(threadPool); @@ -75,9 +76,9 @@ public void verifyNoMoreClientInteractions() { public void testStateRead() throws IOException { ByteArrayInputStream stream = new ByteArrayInputStream(STATE_SAMPLE.getBytes(StandardCharsets.UTF_8)); - stateProcessor.process("_id", stream); + stateProcessor.process(stream); ArgumentCaptor bytesRefCaptor = ArgumentCaptor.forClass(BytesReference.class); - verify(stateProcessor, times(3)).persist(eq("_id"), bytesRefCaptor.capture()); + verify(stateProcessor, times(3)).persist(bytesRefCaptor.capture()); String[] threeStates = STATE_SAMPLE.split("\0"); List capturedBytes = bytesRefCaptor.getAllValues(); @@ -92,9 +93,9 @@ public void testStateReadGivenConsecutiveZeroBytes() throws IOException { String zeroBytes = "\0\0\0\0\0\0"; ByteArrayInputStream stream = new ByteArrayInputStream(zeroBytes.getBytes(StandardCharsets.UTF_8)); - stateProcessor.process("_id", stream); + stateProcessor.process(stream); - verify(stateProcessor, never()).persist(eq("_id"), any()); + verify(stateProcessor, never()).persist(any()); Mockito.verifyNoMoreInteractions(client); } @@ -102,9 +103,9 @@ public void testStateReadGivenConsecutiveSpacesFollowedByZeroByte() throws IOExc String zeroBytes = " \n\0"; ByteArrayInputStream stream = new ByteArrayInputStream(zeroBytes.getBytes(StandardCharsets.UTF_8)); - stateProcessor.process("_id", stream); + stateProcessor.process(stream); - verify(stateProcessor, times(1)).persist(eq("_id"), any()); + verify(stateProcessor, times(1)).persist(any()); Mockito.verifyNoMoreInteractions(client); } @@ -125,8 +126,8 @@ public void testLargeStateRead() throws Exception { } ByteArrayInputStream stream = new ByteArrayInputStream(builder.toString().getBytes(StandardCharsets.UTF_8)); - stateProcessor.process("_id", stream); - verify(stateProcessor, times(NUM_LARGE_DOCS)).persist(eq("_id"), any()); + stateProcessor.process(stream); + verify(stateProcessor, times(NUM_LARGE_DOCS)).persist(any()); verify(client, times(NUM_LARGE_DOCS)).bulk(any(BulkRequest.class)); verify(client, times(NUM_LARGE_DOCS)).threadPool(); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriterTests.java index 38bef42f800cf..01bdd6a999f26 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriterTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.ml.job.process.DataCountsReporter; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcess; import org.elasticsearch.xpack.ml.job.process.autodetect.writer.AbstractDataToProcessWriter.InputOutputMap; +import org.elasticsearch.xpack.ml.process.writer.LengthEncodedWriter; import org.junit.Before; import org.mockito.Mockito; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriterTests.java index 3d08f5a1c25fb..57554227e9ad3 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriterTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.TimeRange; +import org.elasticsearch.xpack.ml.process.writer.LengthEncodedWriter; import org.junit.Before; import org.mockito.ArgumentCaptor; import org.mockito.InOrder; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerTests.java index 661eeca98db8f..04ea8f2c70ef3 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.BucketInfluencer; @@ -32,7 +31,7 @@ public class NormalizerTests extends ESTestCase { private static final double INITIAL_SCORE = 3.0; private static final double FACTOR = 2.0; - private Bucket generateBucket(Date timestamp) throws IOException { + private Bucket generateBucket(Date timestamp) { return new Bucket(JOB_ID, timestamp, BUCKET_SPAN); } @@ -49,8 +48,8 @@ public void testNormalize() throws IOException, InterruptedException { ExecutorService threadpool = Executors.newScheduledThreadPool(1); try { NormalizerProcessFactory processFactory = mock(NormalizerProcessFactory.class); - when(processFactory.createNormalizerProcess(eq(JOB_ID), eq(QUANTILES_STATE), eq(BUCKET_SPAN), - any())).thenReturn(new MultiplyingNormalizerProcess(Settings.EMPTY, FACTOR)); + when(processFactory.createNormalizerProcess(eq(JOB_ID), eq(QUANTILES_STATE), eq(BUCKET_SPAN), any())) + .thenReturn(new MultiplyingNormalizerProcess(FACTOR)); Normalizer normalizer = new Normalizer(JOB_ID, processFactory, threadpool); Bucket bucket = generateBucket(new Date(0)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandlerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandlerTests.java index 9e6a4afc4e318..cc0234df39ed7 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandlerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandlerTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer.output; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerResult; @@ -32,7 +31,7 @@ public void testParse() throws IOException { + "\"value_field_name\":\"x\",\"probability\":0.03,\"normalized_score\":22.22}\n"; InputStream is = new ByteArrayInputStream(testData.getBytes(StandardCharsets.UTF_8)); - NormalizerResultHandler handler = new NormalizerResultHandler(Settings.EMPTY, is); + NormalizerResultHandler handler = new NormalizerResultHandler(is); handler.process(); List results = handler.getNormalizedResults(); assertEquals(3, results.size()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeControllerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java similarity index 99% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeControllerTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java index 08c73cdd9c7e9..ac00e8a24e1cf 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeControllerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeStorageProviderTests.java similarity index 99% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProviderTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeStorageProviderTests.java index 3103e76c82bde..fd87e29387e0b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeStorageProviderTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -21,12 +21,11 @@ import java.util.HashMap; import java.util.Map; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import static org.mockito.Mockito.any; - -import static org.mockito.Mockito.spy; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; public class NativeStorageProviderTests extends ESTestCase { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/ProcessPipesTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/ProcessPipesTests.java similarity index 99% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/ProcessPipesTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/ProcessPipesTests.java index 708d7af152014..fa703e778c49d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/ProcessPipesTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/ProcessPipesTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandlerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java similarity index 99% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandlerTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java index af2691d6f3575..d490d58c3ab52 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandlerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.logging; +package org.elasticsearch.xpack.ml.process.logging; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; @@ -203,7 +203,7 @@ public void testParseFatalError() throws IOException, IllegalAccessException { } } - private static void executeLoggingTest(InputStream is, MockLogAppender mockAppender, Level level, String jobId) + private static void executeLoggingTest(InputStream is, MockLogAppender mockAppender, Level level, String jobId) throws IOException { Logger cppMessageLogger = Loggers.getLogger(CppLogMessageHandler.class); Loggers.addAppender(cppMessageLogger, mockAppender); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageTests.java similarity index 98% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageTests.java index d3145bb9f6c6b..c6a0bdf151a48 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.logging; +package org.elasticsearch.xpack.ml.process.logging; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.DeprecationHandler; @@ -72,4 +72,4 @@ protected Reader instanceReader() { protected CppLogMessage doParseInstance(XContentParser parser) { return CppLogMessage.PARSER.apply(parser, null); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/LengthEncodedWriterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/writer/LengthEncodedWriterTests.java similarity index 99% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/LengthEncodedWriterTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/writer/LengthEncodedWriterTests.java index 36f8c8f003050..0e9aff1fb2caf 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/LengthEncodedWriterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/writer/LengthEncodedWriterTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.autodetect.writer; +package org.elasticsearch.xpack.ml.process.writer; import org.elasticsearch.test.ESTestCase; import org.junit.Assert; From af28d1f64869d537aa3db9f3025d8a0a60d783e8 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Fri, 26 Oct 2018 08:47:39 -0600 Subject: [PATCH 14/14] Fix line length for org.elasticsearch.common.* files (#34888) This removes the checkstyle suppressions for things in the `common` package. Relates to #34884 --- .../resources/checkstyle_suppressions.xml | 34 ----- .../org/elasticsearch/common/Numbers.java | 9 +- .../common/blobstore/fs/FsBlobStore.java | 3 +- .../common/bytes/BytesArray.java | 3 +- .../common/bytes/PagedBytesReference.java | 3 +- .../org/elasticsearch/common/cache/Cache.java | 3 +- .../common/collect/ImmutableOpenIntMap.java | 4 +- .../DefaultConstructionProxyFactory.java | 3 +- .../inject/internal/ConstructionContext.java | 3 +- .../inject/multibindings/MapBinder.java | 6 +- .../common/inject/spi/InjectionPoint.java | 3 +- .../org/elasticsearch/common/io/Channels.java | 15 ++- .../org/elasticsearch/common/joda/Joda.java | 24 ++-- .../common/lucene/search/XMoreLikeThis.java | 3 +- .../elasticsearch/common/network/Cidrs.java | 6 +- .../common/network/NetworkService.java | 9 +- .../common/recycler/Recyclers.java | 3 +- .../elasticsearch/common/util/BigArrays.java | 24 ++-- .../common/util/CancellableThreads.java | 3 +- .../common/util/CollectionUtils.java | 3 +- .../common/util/concurrent/EsExecutors.java | 15 ++- .../common/util/concurrent/ThreadBarrier.java | 24 ++-- .../common/util/concurrent/ThreadContext.java | 23 ++-- .../common/xcontent/XContentHelper.java | 12 +- .../common/geo/ShapeBuilderTests.java | 3 +- .../common/hash/MessageDigestsTests.java | 24 ++-- .../common/network/CidrsTests.java | 3 +- .../common/unit/DistanceUnitTests.java | 6 +- .../common/unit/FuzzinessTests.java | 3 +- .../common/util/LongObjectHashMapTests.java | 3 +- .../util/concurrent/EsExecutorsTests.java | 6 +- .../concurrent/PrioritizedExecutorsTests.java | 3 +- .../builder/XContentBuilderTests.java | 6 +- .../FilterPathGeneratorFilteringTests.java | 117 +++++++++++------- 34 files changed, 242 insertions(+), 170 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 1297b305ea0c4..267488e97a04f 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -164,29 +164,6 @@ - - - - - - - - - - - - - - - - - - - - - - - @@ -407,17 +384,6 @@ - - - - - - - - - - - diff --git a/server/src/main/java/org/elasticsearch/common/Numbers.java b/server/src/main/java/org/elasticsearch/common/Numbers.java index 2c4d700c92ce3..7561175f3fe35 100644 --- a/server/src/main/java/org/elasticsearch/common/Numbers.java +++ b/server/src/main/java/org/elasticsearch/common/Numbers.java @@ -61,7 +61,8 @@ public static int bytesToInt(byte[] arr) { } public static int bytesToInt(BytesRef bytes) { - return (bytes.bytes[bytes.offset] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 2] & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff); + return (bytes.bytes[bytes.offset] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | + ((bytes.bytes[bytes.offset + 2] & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff); } /** @@ -77,8 +78,10 @@ public static long bytesToLong(byte[] arr) { } public static long bytesToLong(BytesRef bytes) { - int high = (bytes.bytes[bytes.offset + 0] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 2] & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff); - int low = (bytes.bytes[bytes.offset + 4] << 24) | ((bytes.bytes[bytes.offset + 5] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 6] & 0xff) << 8) | (bytes.bytes[bytes.offset + 7] & 0xff); + int high = (bytes.bytes[bytes.offset + 0] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | + ((bytes.bytes[bytes.offset + 2] & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff); + int low = (bytes.bytes[bytes.offset + 4] << 24) | ((bytes.bytes[bytes.offset + 5] & 0xff) << 16) | + ((bytes.bytes[bytes.offset + 6] & 0xff) << 8) | (bytes.bytes[bytes.offset + 7] & 0xff); return (((long) high) << 32) | (low & 0x0ffffffffL); } diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index 29f3b2f7e15fa..c49143edb446e 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -48,7 +48,8 @@ public FsBlobStore(Settings settings, Path path) throws IOException { if (!this.readOnly) { Files.createDirectories(path); } - this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.fs.buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes(); + this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.fs.buffer_size", + new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes(); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java index 9b78c2fe5a788..de21acc487df5 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java @@ -68,7 +68,8 @@ public int length() { @Override public BytesReference slice(int from, int length) { if (from < 0 || (from + length) > this.length) { - throw new IllegalArgumentException("can't slice a buffer with length [" + this.length + "], with slice parameters from [" + from + "], length [" + length + "]"); + throw new IllegalArgumentException("can't slice a buffer with length [" + this.length + + "], with slice parameters from [" + from + "], length [" + length + "]"); } return new BytesArray(bytes, offset + from, length); } diff --git a/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java index b336acfba2008..f6dcdfccca01a 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java @@ -63,7 +63,8 @@ public int length() { @Override public BytesReference slice(int from, int length) { if (from < 0 || (from + length) > length()) { - throw new IllegalArgumentException("can't slice a buffer with length [" + length() + "], with slice parameters from [" + from + "], length [" + length + "]"); + throw new IllegalArgumentException("can't slice a buffer with length [" + length() + + "], with slice parameters from [" + from + "], length [" + length + "]"); } return new PagedBytesReference(bigarrays, byteArray, offset + from, length); } diff --git a/server/src/main/java/org/elasticsearch/common/cache/Cache.java b/server/src/main/java/org/elasticsearch/common/cache/Cache.java index beb2819f2e6dc..67061a1533475 100644 --- a/server/src/main/java/org/elasticsearch/common/cache/Cache.java +++ b/server/src/main/java/org/elasticsearch/common/cache/Cache.java @@ -485,7 +485,8 @@ private void put(K key, V value, long now) { promote(tuple.v1(), now); } if (replaced) { - removalListener.onRemoval(new RemovalNotification<>(tuple.v2().key, tuple.v2().value, RemovalNotification.RemovalReason.REPLACED)); + removalListener.onRemoval(new RemovalNotification<>(tuple.v2().key, tuple.v2().value, + RemovalNotification.RemovalReason.REPLACED)); } } diff --git a/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java b/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java index 43e3552909b36..cb4457ce24b9b 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java +++ b/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java @@ -39,8 +39,8 @@ /** * An immutable map implementation based on open hash map. *

- * Can be constructed using a {@link #builder()}, or using {@link #builder(org.elasticsearch.common.collect.ImmutableOpenIntMap)} (which is an optimized - * option to copy over existing content and modify it). + * Can be constructed using a {@link #builder()}, or using {@link #builder(org.elasticsearch.common.collect.ImmutableOpenIntMap)} + * (which is an optimized option to copy over existing content and modify it). */ public final class ImmutableOpenIntMap implements Iterable> { diff --git a/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java b/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java index 49ada56cefa6b..36c55d0cb932a 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java +++ b/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java @@ -50,7 +50,8 @@ public T newInstance(Object... arguments) throws InvocationTargetException { } catch (InstantiationException e) { throw new AssertionError(e); // shouldn't happen, we know this is a concrete type } catch (IllegalAccessException e) { - throw new AssertionError("Wrong access modifiers on " + constructor, e); // a security manager is blocking us, we're hosed + // a security manager is blocking us, we're hosed + throw new AssertionError("Wrong access modifiers on " + constructor, e); } } diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java b/server/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java index 34c9faf77e770..0813f1f51b34d 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java @@ -79,7 +79,8 @@ public Object createProxy(Errors errors, Class expectedType) throws ErrorsExc // ES: Replace, since we don't use bytecode gen, just get the type class loader, or system if its null //ClassLoader classLoader = BytecodeGen.getClassLoader(expectedType); - ClassLoader classLoader = expectedType.getClassLoader() == null ? ClassLoader.getSystemClassLoader() : expectedType.getClassLoader(); + ClassLoader classLoader = expectedType.getClassLoader() == null ? + ClassLoader.getSystemClassLoader() : expectedType.getClassLoader(); return expectedType.cast(Proxy.newProxyInstance(classLoader, new Class[]{expectedType}, invocationHandler)); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/multibindings/MapBinder.java b/server/src/main/java/org/elasticsearch/common/inject/multibindings/MapBinder.java index a9a1bb173b797..a0a22d96f58d5 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/multibindings/MapBinder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/multibindings/MapBinder.java @@ -275,7 +275,8 @@ public static class MapBinderProviderWithDependencies implements ProviderWi private final Provider>>> provider; @SuppressWarnings("rawtypes") // code is silly stupid with generics - MapBinderProviderWithDependencies(RealMapBinder binder, Set> dependencies, Provider>>> provider) { + MapBinderProviderWithDependencies(RealMapBinder binder, Set> dependencies, + Provider>>> provider) { this.binder = binder; this.dependencies = dependencies; this.provider = provider; @@ -315,7 +316,8 @@ public void configure(Binder binder) { // binds a Map> from a collection of Map> final Provider>>> entrySetProvider = binder .getProvider(entrySetBinder.getSetKey()); - binder.bind(providerMapKey).toProvider(new MapBinderProviderWithDependencies(RealMapBinder.this, dependencies, entrySetProvider)); + binder.bind(providerMapKey) + .toProvider(new MapBinderProviderWithDependencies(RealMapBinder.this, dependencies, entrySetProvider)); final Provider>> mapProvider = binder.getProvider(providerMapKey); binder.bind(mapKey).toProvider(new ProviderWithDependencies>() { diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java index 333938843c13e..07ef3162300d1 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java +++ b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java @@ -345,7 +345,8 @@ private static void checkForMisplacedBindingAnnotations(Member member, Errors er } private static void addInjectionPoints(TypeLiteral type, - Factory factory, boolean statics, Collection injectionPoints, + Factory factory, boolean statics, + Collection injectionPoints, Errors errors) { if (type.getType() == Object.class) { return; diff --git a/server/src/main/java/org/elasticsearch/common/io/Channels.java b/server/src/main/java/org/elasticsearch/common/io/Channels.java index cb8ac062fbcb2..1d76be43ca981 100644 --- a/server/src/main/java/org/elasticsearch/common/io/Channels.java +++ b/server/src/main/java/org/elasticsearch/common/io/Channels.java @@ -62,7 +62,8 @@ public static byte[] readFromFileChannel(FileChannel channel, long position, int * @param destOffset offset in dest to read into * @param length number of bytes to read */ - public static void readFromFileChannelWithEofException(FileChannel channel, long channelPosition, byte[] dest, int destOffset, int length) throws IOException { + public static void readFromFileChannelWithEofException(FileChannel channel, long channelPosition, + byte[] dest, int destOffset, int length) throws IOException { int read = readFromFileChannel(channel, channelPosition, dest, destOffset, length); if (read < 0) { throw new EOFException("read past EOF. pos [" + channelPosition + "] length: [" + length + "] end: [" + channel.size() + "]"); @@ -80,7 +81,8 @@ public static void readFromFileChannelWithEofException(FileChannel channel, long * @return total bytes read or -1 if an attempt was made to read past EOF. The method always tries to read all the bytes * that will fit in the destination byte buffer. */ - public static int readFromFileChannel(FileChannel channel, long channelPosition, byte[] dest, int destOffset, int length) throws IOException { + public static int readFromFileChannel(FileChannel channel, long channelPosition, byte[] dest, + int destOffset, int length) throws IOException { ByteBuffer buffer = ByteBuffer.wrap(dest, destOffset, length); return readFromFileChannel(channel, channelPosition, buffer); } @@ -97,7 +99,8 @@ public static int readFromFileChannel(FileChannel channel, long channelPosition, public static void readFromFileChannelWithEofException(FileChannel channel, long channelPosition, ByteBuffer dest) throws IOException { int read = readFromFileChannel(channel, channelPosition, dest); if (read < 0) { - throw new EOFException("read past EOF. pos [" + channelPosition + "] length: [" + dest.limit() + "] end: [" + channel.size() + "]"); + throw new EOFException("read past EOF. pos [" + channelPosition + + "] length: [" + dest.limit() + "] end: [" + channel.size() + "]"); } } @@ -135,7 +138,8 @@ public static int readFromFileChannel(FileChannel channel, long channelPosition, dest.position(tmpBuffer.position()); } - assert bytesRead == bytesToRead : "failed to read an entire buffer but also didn't get an EOF (read [" + bytesRead + "] needed [" + bytesToRead + "]"; + assert bytesRead == bytesToRead : "failed to read an entire buffer but also didn't get an EOF (read [" + + bytesRead + "] needed [" + bytesToRead + "]"; return bytesRead; } } @@ -149,7 +153,8 @@ private static int readSingleChunk(FileChannel channel, long channelPosition, By return read; } - assert read > 0 : "FileChannel.read with non zero-length bb.remaining() must always read at least one byte (FileChannel is in blocking mode, see spec of ReadableByteChannel)"; + assert read > 0 : "FileChannel.read with non zero-length bb.remaining() must always read at least one byte " + + "(FileChannel is in blocking mode, see spec of ReadableByteChannel)"; bytesRead += read; channelPosition += read; diff --git a/server/src/main/java/org/elasticsearch/common/joda/Joda.java b/server/src/main/java/org/elasticsearch/common/joda/Joda.java index 35ae6e2341f8d..9b3e5974fb6ca 100644 --- a/server/src/main/java/org/elasticsearch/common/joda/Joda.java +++ b/server/src/main/java/org/elasticsearch/common/joda/Joda.java @@ -154,9 +154,11 @@ public static FormatDateTimeFormatter forPattern(String input, Locale locale) { } else if ("yearMonthDay".equals(input) || "year_month_day".equals(input)) { formatter = ISODateTimeFormat.yearMonthDay(); } else if ("epoch_second".equals(input)) { - formatter = new DateTimeFormatterBuilder().append(new EpochTimePrinter(false), new EpochTimeParser(false)).toFormatter(); + formatter = new DateTimeFormatterBuilder().append(new EpochTimePrinter(false), + new EpochTimeParser(false)).toFormatter(); } else if ("epoch_millis".equals(input)) { - formatter = new DateTimeFormatterBuilder().append(new EpochTimePrinter(true), new EpochTimeParser(true)).toFormatter(); + formatter = new DateTimeFormatterBuilder().append(new EpochTimePrinter(true), + new EpochTimeParser(true)).toFormatter(); // strict date formats here, must be at least 4 digits for year and two for months and two for day } else if ("strictBasicWeekDate".equals(input) || "strict_basic_week_date".equals(input)) { formatter = StrictISODateTimeFormat.basicWeekDate(); @@ -245,7 +247,8 @@ public static FormatDateTimeFormatter forPattern(String input, Locale locale) { parsers[i] = currentParser.getParser(); } - DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().append(dateTimeFormatter.withZone(DateTimeZone.UTC).getPrinter(), parsers); + DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder() + .append(dateTimeFormatter.withZone(DateTimeZone.UTC).getPrinter(), parsers); formatter = builder.toFormatter(); } } else { @@ -286,9 +289,11 @@ public static FormatDateTimeFormatter getStrictStandardDateFormatter() { .toFormatter() .withZoneUTC(); - DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().append(longFormatter.withZone(DateTimeZone.UTC).getPrinter(), new DateTimeParser[]{longFormatter.getParser(), shortFormatter.getParser(), new EpochTimeParser(true)}); + DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().append(longFormatter.withZone(DateTimeZone.UTC).getPrinter(), + new DateTimeParser[]{longFormatter.getParser(), shortFormatter.getParser(), new EpochTimeParser(true)}); - return new FormatDateTimeFormatter("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis", builder.toFormatter().withZone(DateTimeZone.UTC), Locale.ROOT); + return new FormatDateTimeFormatter("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis", + builder.toFormatter().withZone(DateTimeZone.UTC), Locale.ROOT); } @@ -312,7 +317,8 @@ public DurationFieldType getRangeDurationType() { @Override public DateTimeField getField(Chronology chronology) { - return new OffsetDateTimeField(new DividedDateTimeField(new OffsetDateTimeField(chronology.monthOfYear(), -1), QuarterOfYear, 3), 1); + return new OffsetDateTimeField( + new DividedDateTimeField(new OffsetDateTimeField(chronology.monthOfYear(), -1), QuarterOfYear, 3), 1); } }; @@ -393,7 +399,8 @@ public void printTo(StringBuffer buf, long instant, Chronology chrono, int displ * {@link DateTimeFormatter#printTo(Appendable, long, Chronology)} when using a time zone. */ @Override - public void printTo(Writer out, long instant, Chronology chrono, int displayOffset, DateTimeZone displayZone, Locale locale) throws IOException { + public void printTo(Writer out, long instant, Chronology chrono, int displayOffset, + DateTimeZone displayZone, Locale locale) throws IOException { if (hasMilliSecondPrecision) { out.write(String.valueOf(instant - displayOffset)); } else { @@ -427,7 +434,8 @@ private long getDateTimeMillis(ReadablePartial partial) { int minuteOfHour = partial.get(DateTimeFieldType.minuteOfHour()); int secondOfMinute = partial.get(DateTimeFieldType.secondOfMinute()); int millisOfSecond = partial.get(DateTimeFieldType.millisOfSecond()); - return partial.getChronology().getDateTimeMillis(year, monthOfYear, dayOfMonth, hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond); + return partial.getChronology().getDateTimeMillis(year, monthOfYear, dayOfMonth, + hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond); } } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java b/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java index f931ee2dc31a7..1920db12117d4 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java @@ -157,7 +157,8 @@ public final class XMoreLikeThis { // static { -// assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_4_9: "Remove this class once we upgrade to Lucene 5.0"; +// assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_4_9: +// "Remove this class once we upgrade to Lucene 5.0"; // } /** diff --git a/server/src/main/java/org/elasticsearch/common/network/Cidrs.java b/server/src/main/java/org/elasticsearch/common/network/Cidrs.java index 1bdd7bf562b93..bdf2257e90298 100644 --- a/server/src/main/java/org/elasticsearch/common/network/Cidrs.java +++ b/server/src/main/java/org/elasticsearch/common/network/Cidrs.java @@ -40,13 +40,15 @@ public static long[] cidrMaskToMinMax(String cidr) { String[] fields = cidr.split("/"); if (fields.length != 2) { throw new IllegalArgumentException( - String.format(Locale.ROOT, "invalid IPv4/CIDR; expected [a.b.c.d, e] but was [%s] after splitting on \"/\" in [%s]", Arrays.toString(fields), cidr) + String.format(Locale.ROOT, "invalid IPv4/CIDR; expected [a.b.c.d, e] but was [%s] after splitting on \"/\" in [%s]", + Arrays.toString(fields), cidr) ); } // do not try to parse IPv4-mapped IPv6 address if (fields[0].contains(":")) { throw new IllegalArgumentException( - String.format(Locale.ROOT, "invalid IPv4/CIDR; expected [a.b.c.d, e] where a, b, c, d are decimal octets but was [%s] after splitting on \"/\" in [%s]", Arrays.toString(fields), cidr) + String.format(Locale.ROOT, "invalid IPv4/CIDR; expected [a.b.c.d, e] where a, b, c, d are decimal octets " + + "but was [%s] after splitting on \"/\" in [%s]", Arrays.toString(fields), cidr) ); } byte[] addressBytes; diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkService.java b/server/src/main/java/org/elasticsearch/common/network/NetworkService.java index 7dab3e5256682..de4aee289d336 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkService.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkService.java @@ -113,7 +113,8 @@ public InetAddress[] resolveBindHostAddresses(String bindHosts[]) throws IOExcep } // check if its a wildcard address: this is only ok if its the only address! if (address.isAnyLocalAddress() && addresses.length > 1) { - throw new IllegalArgumentException("bind address: {" + NetworkAddress.format(address) + "} is wildcard, but multiple addresses specified: this makes no sense"); + throw new IllegalArgumentException("bind address: {" + NetworkAddress.format(address) + + "} is wildcard, but multiple addresses specified: this makes no sense"); } } return addresses; @@ -156,12 +157,14 @@ public InetAddress resolvePublishHostAddresses(String publishHosts[]) throws IOE for (InetAddress address : addresses) { // check if its multicast: flat out mistake if (address.isMulticastAddress()) { - throw new IllegalArgumentException("publish address: {" + NetworkAddress.format(address) + "} is invalid: multicast address"); + throw new IllegalArgumentException("publish address: {" + NetworkAddress.format(address) + + "} is invalid: multicast address"); } // check if its a wildcard address: this is only ok if its the only address! // (if it was a single wildcard address, it was replaced by step 1 above) if (address.isAnyLocalAddress()) { - throw new IllegalArgumentException("publish address: {" + NetworkAddress.format(address) + "} is wildcard, but multiple addresses specified: this makes no sense"); + throw new IllegalArgumentException("publish address: {" + NetworkAddress.format(address) + + "} is wildcard, but multiple addresses specified: this makes no sense"); } } diff --git a/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java b/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java index f84441fbce436..b19f569481db1 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java @@ -148,7 +148,8 @@ public boolean isRecycled() { } /** - * Create a concurrent implementation that can support concurrent access from concurrencyLevel threads with little contention. + * Create a concurrent implementation that can support concurrent access from + * concurrencyLevel threads with little contention. */ public static Recycler concurrent(final Recycler.Factory factory, final int concurrencyLevel) { if (concurrencyLevel < 1) { diff --git a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java index 1e305d60fea03..12c511311ea5b 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -44,7 +44,8 @@ public class BigArrays implements Releasable { public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / Long.BYTES; public static final int OBJECT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF; - /** Returns the next size to grow when working with parallel arrays that may have different page sizes or number of bytes per element. */ + /** Returns the next size to grow when working with parallel arrays that + * may have different page sizes or number of bytes per element. */ public static long overSize(long minTargetSize) { return overSize(minTargetSize, PAGE_SIZE_IN_BYTES / 8, 1); } @@ -345,7 +346,8 @@ private static class ObjectArrayWrapper extends AbstractArrayWrapper implemen @Override public long ramBytesUsed() { - return SHALLOW_SIZE + RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + RamUsageEstimator.NUM_BYTES_OBJECT_REF * size()); + return SHALLOW_SIZE + RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + + RamUsageEstimator.NUM_BYTES_OBJECT_REF * size()); } @SuppressWarnings("unchecked") @@ -503,7 +505,8 @@ public ByteArray resize(ByteArray array, long size) { } } - /** Grow an array to a size that is larger than minSize, preserving content, and potentially reusing part of the provided array. */ + /** Grow an array to a size that is larger than minSize, + * preserving content, and potentially reusing part of the provided array. */ public ByteArray grow(ByteArray array, long minSize) { if (minSize <= array.size()) { return array; @@ -587,7 +590,8 @@ public IntArray resize(IntArray array, long size) { } } - /** Grow an array to a size that is larger than minSize, preserving content, and potentially reusing part of the provided array. */ + /** Grow an array to a size that is larger than minSize, + * preserving content, and potentially reusing part of the provided array. */ public IntArray grow(IntArray array, long minSize) { if (minSize <= array.size()) { return array; @@ -638,7 +642,8 @@ public LongArray resize(LongArray array, long size) { } } - /** Grow an array to a size that is larger than minSize, preserving content, and potentially reusing part of the provided array. */ + /** Grow an array to a size that is larger than minSize, + * preserving content, and potentially reusing part of the provided array. */ public LongArray grow(LongArray array, long minSize) { if (minSize <= array.size()) { return array; @@ -686,7 +691,8 @@ public DoubleArray resize(DoubleArray array, long size) { } } - /** Grow an array to a size that is larger than minSize, preserving content, and potentially reusing part of the provided array. */ + /** Grow an array to a size that is larger than minSize, + * preserving content, and potentially reusing part of the provided array. */ public DoubleArray grow(DoubleArray array, long minSize) { if (minSize <= array.size()) { return array; @@ -734,7 +740,8 @@ public FloatArray resize(FloatArray array, long size) { } } - /** Grow an array to a size that is larger than minSize, preserving content, and potentially reusing part of the provided array. */ + /** Grow an array to a size that is larger than minSize, + * preserving content, and potentially reusing part of the provided array. */ public FloatArray grow(FloatArray array, long minSize) { if (minSize <= array.size()) { return array; @@ -775,7 +782,8 @@ public ObjectArray resize(ObjectArray array, long size) { } } - /** Grow an array to a size that is larger than minSize, preserving content, and potentially reusing part of the provided array. */ + /** Grow an array to a size that is larger than minSize, + * preserving content, and potentially reusing part of the provided array. */ public ObjectArray grow(ObjectArray array, long minSize) { if (minSize <= array.size()) { return array; diff --git a/server/src/main/java/org/elasticsearch/common/util/CancellableThreads.java b/server/src/main/java/org/elasticsearch/common/util/CancellableThreads.java index 4399ba6a8fe5b..c2f55b8d9b939 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CancellableThreads.java +++ b/server/src/main/java/org/elasticsearch/common/util/CancellableThreads.java @@ -45,7 +45,8 @@ public synchronized boolean isCancelled() { } - /** call this will throw an exception if operation was cancelled. Override {@link #onCancel(String, Exception)} for custom failure logic */ + /** call this will throw an exception if operation was cancelled. + * Override {@link #onCancel(String, Exception)} for custom failure logic */ public synchronized void checkForCancel() { if (isCancelled()) { onCancel(reason, null); diff --git a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index 391f23c4f94c0..ce1bfe87131ba 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -307,7 +307,8 @@ public static void sort(final BytesRefArray bytes, final int[] indices) { sort(new BytesRefBuilder(), new BytesRefBuilder(), bytes, indices); } - private static void sort(final BytesRefBuilder scratch, final BytesRefBuilder scratch1, final BytesRefArray bytes, final int[] indices) { + private static void sort(final BytesRefBuilder scratch, final BytesRefBuilder scratch1, + final BytesRefArray bytes, final int[] indices) { final int numValues = bytes.size(); assert indices.length >= numValues; diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index d38eb03fae3dd..abc95810ba9a9 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -59,25 +59,30 @@ public static int numberOfProcessors(final Settings settings) { return PROCESSORS_SETTING.get(settings); } - public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory, ThreadContext contextHolder, ScheduledExecutorService timer) { + public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory, + ThreadContext contextHolder, ScheduledExecutorService timer) { return new PrioritizedEsThreadPoolExecutor(name, 1, 1, 0L, TimeUnit.MILLISECONDS, threadFactory, contextHolder, timer); } - public static EsThreadPoolExecutor newScaling(String name, int min, int max, long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory, ThreadContext contextHolder) { + public static EsThreadPoolExecutor newScaling(String name, int min, int max, long keepAliveTime, TimeUnit unit, + ThreadFactory threadFactory, ThreadContext contextHolder) { ExecutorScalingQueue queue = new ExecutorScalingQueue<>(); - EsThreadPoolExecutor executor = new EsThreadPoolExecutor(name, min, max, keepAliveTime, unit, queue, threadFactory, new ForceQueuePolicy(), contextHolder); + EsThreadPoolExecutor executor = + new EsThreadPoolExecutor(name, min, max, keepAliveTime, unit, queue, threadFactory, new ForceQueuePolicy(), contextHolder); queue.executor = executor; return executor; } - public static EsThreadPoolExecutor newFixed(String name, int size, int queueCapacity, ThreadFactory threadFactory, ThreadContext contextHolder) { + public static EsThreadPoolExecutor newFixed(String name, int size, int queueCapacity, + ThreadFactory threadFactory, ThreadContext contextHolder) { BlockingQueue queue; if (queueCapacity < 0) { queue = ConcurrentCollections.newBlockingQueue(); } else { queue = new SizeBlockingQueue<>(ConcurrentCollections.newBlockingQueue(), queueCapacity); } - return new EsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS, queue, threadFactory, new EsAbortPolicy(), contextHolder); + return new EsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS, + queue, threadFactory, new EsAbortPolicy(), contextHolder); } /** diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java index 967f0c890d270..0b2b1a5a54c9e 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java @@ -246,18 +246,18 @@ private synchronized void initCause(Throwable t) { *

* Usage example:
*


-     *                                                                                             BarrierTimer timer = new BarrierTimer();
-     *                                                                                             ThreadBarrier barrier = new ThreadBarrier( nTHREADS + 1, timer );
-     *                                                                                             ..
-     *                                                                                             barrier.await(); // starts timer when all threads trip on await
-     *                                                                                             barrier.await(); // stops  timer when all threads trip on await
-     *                                                                                             ..
-     *                                                                                             long time = timer.getTimeInNanos();
-     *                                                                                             long tpi = time / ((long)nREPEATS * nTHREADS); //throughput per thread iteration
-     *                                                                                             long secs = timer.getTimeInSeconds();    //total runtime in seconds
-     *                                                                                             ..
-     *                                                                                             timer.reset();  // reuse timer
-     *                                                                                           
+ * BarrierTimer timer = new BarrierTimer(); + * ThreadBarrier barrier = new ThreadBarrier( nTHREADS + 1, timer ); + * .. + * barrier.await(); // starts timer when all threads trip on await + * barrier.await(); // stops timer when all threads trip on await + * .. + * long time = timer.getTimeInNanos(); + * long tpi = time / ((long)nREPEATS * nTHREADS); //throughput per thread iteration + * long secs = timer.getTimeInSeconds(); //total runtime in seconds + * .. + * timer.reset(); // reuse timer + * */ public static class BarrierTimer implements Runnable { volatile boolean started; diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index f2b1c209cd9dc..9664811149567 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -56,14 +56,15 @@ /** * A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with * a thread. It allows to store and retrieve header information across method calls, network calls as well as threads spawned from a - * thread that has a {@link ThreadContext} associated with. Threads spawned from a {@link org.elasticsearch.threadpool.ThreadPool} have out of the box - * support for {@link ThreadContext} and all threads spawned will inherit the {@link ThreadContext} from the thread that it is forking from.". - * Network calls will also preserve the senders headers automatically. + * thread that has a {@link ThreadContext} associated with. Threads spawned from a {@link org.elasticsearch.threadpool.ThreadPool} + * have out of the box support for {@link ThreadContext} and all threads spawned will inherit the {@link ThreadContext} from the thread + * that it is forking from.". Network calls will also preserve the senders headers automatically. *

- * Consumers of ThreadContext usually don't need to interact with adding or stashing contexts. Every elasticsearch thread is managed by a thread pool or executor - * being responsible for stashing and restoring the threads context. For instance if a network request is received, all headers are deserialized from the network - * and directly added as the headers of the threads {@link ThreadContext} (see {@link #readHeaders(StreamInput)}. In order to not modify the context that is currently - * active on this thread the network code uses a try/with pattern to stash it's current context, read headers into a fresh one and once the request is handled or a handler thread + * Consumers of ThreadContext usually don't need to interact with adding or stashing contexts. Every elasticsearch thread is managed by + * a thread pool or executor being responsible for stashing and restoring the threads context. For instance if a network request is + * received, all headers are deserialized from the network and directly added as the headers of the threads {@link ThreadContext} + * (see {@link #readHeaders(StreamInput)}. In order to not modify the context that is currently active on this thread the network code + * uses a try/with pattern to stash it's current context, read headers into a fresh one and once the request is handled or a handler thread * is forked (which in turn inherits the context) it restores the previous context. For instance: *

*
@@ -127,8 +128,9 @@ public StoredContext stashContext() {
     }
 
     /**
-     * Removes the current context and resets a new context that contains a merge of the current headers and the given headers. The removed context can be
-     * restored when closing the returned {@link StoredContext}. The merge strategy is that headers that are already existing are preserved unless they are defaults.
+     * Removes the current context and resets a new context that contains a merge of the current headers and the given headers.
+     * The removed context can be restored when closing the returned {@link StoredContext}. The merge strategy is that headers
+     * that are already existing are preserved unless they are defaults.
      */
     public StoredContext stashAndMergeHeaders(Map headers) {
         final ThreadContextStruct context = threadLocal.get();
@@ -481,7 +483,8 @@ private ThreadContextStruct putResponse(final String key, final String value, fi
                     logger.warn("Dropping a warning header, as their total size reached the maximum allowed of ["
                             + maxWarningHeaderSize + "] bytes set in ["
                             + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE.getKey() + "]!");
-                    return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, isSystemContext, newWarningHeaderSize);
+                    return new ThreadContextStruct(requestHeaders, responseHeaders,
+                        transientHeaders, isSystemContext, newWarningHeaderSize);
                 }
             }
 
diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java
index 9c01c094b7a0d..d193cfd510823 100644
--- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java
+++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java
@@ -42,7 +42,8 @@ public class XContentHelper {
 
     /**
      * Creates a parser based on the bytes provided
-     * @deprecated use {@link #createParser(NamedXContentRegistry, DeprecationHandler, BytesReference, XContentType)} to avoid content type auto-detection
+     * @deprecated use {@link #createParser(NamedXContentRegistry, DeprecationHandler, BytesReference, XContentType)}
+     * to avoid content type auto-detection
      */
     @Deprecated
     public static XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler,
@@ -109,7 +110,8 @@ public static Tuple> convertToMap(BytesReferen
             }
             contentType = xContentType != null ? xContentType : XContentFactory.xContentType(input);
             try (InputStream stream = input) {
-                return new Tuple<>(Objects.requireNonNull(contentType), convertToMap(XContentFactory.xContent(contentType), stream, ordered));
+                return new Tuple<>(Objects.requireNonNull(contentType),
+                    convertToMap(XContentFactory.xContent(contentType), stream, ordered));
             }
         } catch (IOException e) {
             throw new ElasticsearchParseException("Failed to parse content to map", e);
@@ -294,7 +296,8 @@ private static boolean allListValuesAreMapsOfOne(List list) {
      * auto-detection
      */
     @Deprecated
-    public static void writeRawField(String field, BytesReference source, XContentBuilder builder, ToXContent.Params params) throws IOException {
+    public static void writeRawField(String field, BytesReference source, XContentBuilder builder,
+                                     ToXContent.Params params) throws IOException {
         Compressor compressor = CompressorFactory.compressor(source);
         if (compressor != null) {
             try (InputStream compressedStreamInput = compressor.streamInput(source.streamInput())) {
@@ -340,7 +343,8 @@ public static BytesReference toXContent(ToXContent toXContent, XContentType xCon
      * {@link XContentType}. Wraps the output into a new anonymous object according to the value returned
      * by the {@link ToXContent#isFragment()} method returns.
      */
-    public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, Params params, boolean humanReadable) throws IOException {
+    public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, Params params,
+                                            boolean humanReadable) throws IOException {
         try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) {
             builder.humanReadable(humanReadable);
             if (toXContent.isFragment()) {
diff --git a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
index 78c3963bd0429..5dcc811b0c6be 100644
--- a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
@@ -94,7 +94,8 @@ public void testNewPolygon_coordinate() {
 
     public void testNewPolygon_coordinates() {
         Polygon polygon = new PolygonBuilder(new CoordinatesBuilder()
-                .coordinates(new Coordinate(-45, 30), new Coordinate(45, 30), new Coordinate(45, -30), new Coordinate(-45, -30), new Coordinate(-45, 30))
+                .coordinates(new Coordinate(-45, 30), new Coordinate(45, 30),
+                    new Coordinate(45, -30), new Coordinate(-45, -30), new Coordinate(-45, 30))
                 ).toPolygon();
 
         LineString exterior = polygon.getExteriorRing();
diff --git a/server/src/test/java/org/elasticsearch/common/hash/MessageDigestsTests.java b/server/src/test/java/org/elasticsearch/common/hash/MessageDigestsTests.java
index e3c085f032830..ffe81fff5b634 100644
--- a/server/src/test/java/org/elasticsearch/common/hash/MessageDigestsTests.java
+++ b/server/src/test/java/org/elasticsearch/common/hash/MessageDigestsTests.java
@@ -34,8 +34,10 @@ private void assertHash(String expected, String test, MessageDigest messageDiges
     public void testMd5() throws Exception {
         assertHash("d41d8cd98f00b204e9800998ecf8427e", "", MessageDigests.md5());
         assertHash("900150983cd24fb0d6963f7d28e17f72", "abc", MessageDigests.md5());
-        assertHash("8215ef0796a20bcaaae116d3876c664a", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.md5());
-        assertHash("7707d6ae4e027c70eea2a935c2296f21", new String(new char[1000000]).replace("\0", "a"), MessageDigests.md5());
+        assertHash("8215ef0796a20bcaaae116d3876c664a",
+            "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.md5());
+        assertHash("7707d6ae4e027c70eea2a935c2296f21",
+            new String(new char[1000000]).replace("\0", "a"), MessageDigests.md5());
         assertHash("9e107d9d372bb6826bd81d3542a419d6", "The quick brown fox jumps over the lazy dog", MessageDigests.md5());
         assertHash("1055d3e698d289f2af8663725127bd4b", "The quick brown fox jumps over the lazy cog", MessageDigests.md5());
     }
@@ -43,8 +45,10 @@ public void testMd5() throws Exception {
     public void testSha1() throws Exception {
         assertHash("da39a3ee5e6b4b0d3255bfef95601890afd80709", "", MessageDigests.sha1());
         assertHash("a9993e364706816aba3e25717850c26c9cd0d89d", "abc", MessageDigests.sha1());
-        assertHash("84983e441c3bd26ebaae4aa1f95129e5e54670f1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.sha1());
-        assertHash("34aa973cd4c4daa4f61eeb2bdbad27316534016f", new String(new char[1000000]).replace("\0", "a"), MessageDigests.sha1());
+        assertHash("84983e441c3bd26ebaae4aa1f95129e5e54670f1",
+            "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.sha1());
+        assertHash("34aa973cd4c4daa4f61eeb2bdbad27316534016f",
+            new String(new char[1000000]).replace("\0", "a"), MessageDigests.sha1());
         assertHash("2fd4e1c67a2d28fced849ee1bb76e7391b93eb12", "The quick brown fox jumps over the lazy dog", MessageDigests.sha1());
         assertHash("de9f2c7fd25e1b3afad3e85a0bd17d9b100db4b3", "The quick brown fox jumps over the lazy cog", MessageDigests.sha1());
     }
@@ -52,10 +56,14 @@ public void testSha1() throws Exception {
     public void testSha256() throws Exception {
         assertHash("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "", MessageDigests.sha256());
         assertHash("ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", "abc", MessageDigests.sha256());
-        assertHash("248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.sha256());
-        assertHash("cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0", new String(new char[1000000]).replace("\0", "a"), MessageDigests.sha256());
-        assertHash("d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592", "The quick brown fox jumps over the lazy dog", MessageDigests.sha256());
-        assertHash("e4c4d8f3bf76b692de791a173e05321150f7a345b46484fe427f6acc7ecc81be", "The quick brown fox jumps over the lazy cog", MessageDigests.sha256());
+        assertHash("248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1",
+            "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.sha256());
+        assertHash("cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0",
+            new String(new char[1000000]).replace("\0", "a"), MessageDigests.sha256());
+        assertHash("d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592",
+            "The quick brown fox jumps over the lazy dog", MessageDigests.sha256());
+        assertHash("e4c4d8f3bf76b692de791a173e05321150f7a345b46484fe427f6acc7ecc81be",
+            "The quick brown fox jumps over the lazy cog", MessageDigests.sha256());
     }
 
     public void testToHexString() throws Exception {
diff --git a/server/src/test/java/org/elasticsearch/common/network/CidrsTests.java b/server/src/test/java/org/elasticsearch/common/network/CidrsTests.java
index 0b00353f98ab5..60609dc29930f 100644
--- a/server/src/test/java/org/elasticsearch/common/network/CidrsTests.java
+++ b/server/src/test/java/org/elasticsearch/common/network/CidrsTests.java
@@ -78,7 +78,8 @@ public void testSplittingDot() {
     public void testValidSpecificCases() {
         List> cases = new ArrayList<>();
         cases.add(new Tuple<>("192.168.0.0/24", new long[]{(192L << 24) + (168 << 16), (192L << 24) + (168 << 16) + (1 << 8)}));
-        cases.add(new Tuple<>("192.168.128.0/17", new long[]{(192L << 24) + (168 << 16) + (128 << 8), (192L << 24) + (168 << 16) + (128 << 8) + (1 << 15)}));
+        cases.add(new Tuple<>("192.168.128.0/17",
+            new long[]{(192L << 24) + (168 << 16) + (128 << 8), (192L << 24) + (168 << 16) + (128 << 8) + (1 << 15)}));
         cases.add(new Tuple<>("128.0.0.0/1", new long[]{128L << 24, (128L << 24) + (1L << 31)})); // edge case
         cases.add(new Tuple<>("0.0.0.0/0", new long[]{0, 1L << 32})); // edge case
         cases.add(new Tuple<>("0.0.0.0/1", new long[]{0, 1L << 31})); // edge case
diff --git a/server/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java b/server/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
index eafb7c69b8d9d..13480122d2fd8 100644
--- a/server/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
+++ b/server/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
@@ -53,8 +53,10 @@ public void testDistanceUnitParsing() {
         double testValue = 12345.678;
         for (DistanceUnit unit : DistanceUnit.values()) {
             assertThat("Unit can be parsed from '" + unit.toString() + "'", DistanceUnit.fromString(unit.toString()), equalTo(unit));
-            assertThat("Unit can be parsed from '" + testValue + unit.toString() + "'", DistanceUnit.fromString(unit.toString()), equalTo(unit));
-            assertThat("Value can be parsed from '" + testValue + unit.toString() + "'", DistanceUnit.Distance.parseDistance(unit.toString(testValue)).value, equalTo(testValue));
+            assertThat("Unit can be parsed from '" + testValue + unit.toString() + "'",
+                DistanceUnit.fromString(unit.toString()), equalTo(unit));
+            assertThat("Value can be parsed from '" + testValue + unit.toString() + "'",
+                DistanceUnit.Distance.parseDistance(unit.toString(testValue)).value, equalTo(testValue));
         }
     }
 
diff --git a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java
index 520f80fecac44..026c9a2e078a4 100644
--- a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java
+++ b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java
@@ -67,7 +67,8 @@ public void testParseFromXContent() throws IOException {
                 try (XContentParser parser = createParser(json)) {
                     assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
                     assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
-                    assertThat(parser.nextToken(), anyOf(equalTo(XContentParser.Token.VALUE_NUMBER), equalTo(XContentParser.Token.VALUE_STRING)));
+                    assertThat(parser.nextToken(), anyOf(equalTo(XContentParser.Token.VALUE_NUMBER),
+                        equalTo(XContentParser.Token.VALUE_STRING)));
                     Fuzziness fuzziness = Fuzziness.parse(parser);
                     if (value.intValue() >= 1) {
                         assertThat(fuzziness.asDistance(), equalTo(Math.min(2, value.intValue())));
diff --git a/server/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java b/server/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java
index 9210565a10482..0c1c5bbbcb74e 100644
--- a/server/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java
+++ b/server/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java
@@ -32,7 +32,8 @@ private BigArrays randombigArrays() {
 
     public void testDuel() {
         final LongObjectHashMap map1 = new LongObjectHashMap<>();
-        final LongObjectPagedHashMap map2 = new LongObjectPagedHashMap<>(randomInt(42), 0.6f + randomFloat() * 0.39f, randombigArrays());
+        final LongObjectPagedHashMap map2 =
+            new LongObjectPagedHashMap<>(randomInt(42), 0.6f + randomFloat() * 0.39f, randombigArrays());
         final int maxKey = randomIntBetween(1, 10000);
         final int iters = scaledRandomIntBetween(10000, 100000);
         for (int i = 0; i < iters; ++i) {
diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java
index a0fdcbf51ca1d..ff916c91613dc 100644
--- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java
+++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java
@@ -172,7 +172,8 @@ public void testScaleUp() throws Exception {
         final ThreadBarrier barrier = new ThreadBarrier(max + 1);
 
         ThreadPoolExecutor pool =
-                EsExecutors.newScaling(getClass().getName() + "/" + getTestName(), min, max, between(1, 100), randomTimeUnit(), EsExecutors.daemonThreadFactory("test"), threadContext);
+                EsExecutors.newScaling(getClass().getName() + "/" + getTestName(), min, max, between(1, 100), randomTimeUnit(),
+                    EsExecutors.daemonThreadFactory("test"), threadContext);
         assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
         assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
 
@@ -209,7 +210,8 @@ public void testScaleDown() throws Exception {
         final ThreadBarrier barrier = new ThreadBarrier(max + 1);
 
         final ThreadPoolExecutor pool =
-                EsExecutors.newScaling(getClass().getName() + "/" + getTestName(), min, max, between(1, 100), TimeUnit.MILLISECONDS, EsExecutors.daemonThreadFactory("test"), threadContext);
+                EsExecutors.newScaling(getClass().getName() + "/" + getTestName(), min, max, between(1, 100), TimeUnit.MILLISECONDS,
+                    EsExecutors.daemonThreadFactory("test"), threadContext);
         assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
         assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
 
diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java
index 1eacb4cb18cee..fa3868ec46f37 100644
--- a/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java
+++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java
@@ -159,7 +159,8 @@ public void testSubmitPrioritizedExecutorWithCallables() throws Exception {
     }
 
     public void testSubmitPrioritizedExecutorWithMixed() throws Exception {
-        ExecutorService executor = EsExecutors.newSinglePrioritizing(getTestName(), EsExecutors.daemonThreadFactory(getTestName()), holder, null);
+        ExecutorService executor = EsExecutors.newSinglePrioritizing(getTestName(),
+            EsExecutors.daemonThreadFactory(getTestName()), holder, null);
         List results = new ArrayList<>(8);
         CountDownLatch awaitingLatch = new CountDownLatch(1);
         CountDownLatch finishedLatch = new CountDownLatch(8);
diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
index 07338d9286b70..a281d453e0764 100644
--- a/server/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
@@ -123,7 +123,8 @@ public void testRaw() throws IOException {
             xContentBuilder.rawField("foo", new BytesArray("{\"test\":\"value\"}").streamInput());
             xContentBuilder.field("test1", "value1");
             xContentBuilder.endObject();
-            assertThat(Strings.toString(xContentBuilder), equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"},\"test1\":\"value1\"}"));
+            assertThat(Strings.toString(xContentBuilder),
+                equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"},\"test1\":\"value1\"}"));
         }
         {
             XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON);
@@ -133,7 +134,8 @@ public void testRaw() throws IOException {
             xContentBuilder.rawField("foo1", new BytesArray("{\"test\":\"value\"}").streamInput());
             xContentBuilder.field("test1", "value1");
             xContentBuilder.endObject();
-            assertThat(Strings.toString(xContentBuilder), equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"},\"foo1\":{\"test\":\"value\"},\"test1\":\"value1\"}"));
+            assertThat(Strings.toString(xContentBuilder),
+                equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"},\"foo1\":{\"test\":\"value\"},\"test1\":\"value1\"}"));
         }
     }
 
diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java
index b4d7cb11529b3..b0536fa908cf0 100644
--- a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java
+++ b/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java
@@ -34,7 +34,8 @@ public class FilterPathGeneratorFilteringTests extends ESTestCase {
     private final JsonFactory JSON_FACTORY = new JsonFactory();
 
     public void testInclusiveFilters() throws Exception {
-        final String SAMPLE = "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}";
+        final String SAMPLE = "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}";
 
         assertResult(SAMPLE, "a", true, "{'a':0}");
         assertResult(SAMPLE, "b", true, "{'b':true}");
@@ -79,48 +80,80 @@ public void testInclusiveFilters() throws Exception {
     }
 
     public void testExclusiveFilters() throws Exception {
-        final String SAMPLE = "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}";
-
-        assertResult(SAMPLE, "a", false, "{'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "b", false, "{'a':0,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "c", false, "{'a':0,'b':true,'d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "d", false, "{'a':0,'b':true,'c':'c_value','e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        final String SAMPLE = "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}";
+
+        assertResult(SAMPLE, "a", false, "{'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "b", false, "{'a':0,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "c", false, "{'a':0,'b':true,'d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "d", false, "{'a':0,'b':true,'c':'c_value','e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
         assertResult(SAMPLE, "e", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "h", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "z", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-
-        assertResult(SAMPLE, "e.f1", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "e.f2", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "e.f*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "e.*2", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value'},{'g1':'g1_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-
-        assertResult(SAMPLE, "h.i", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.j", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.j.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.j.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-
-        assertResult(SAMPLE, "h.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "*.i", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-
-        assertResult(SAMPLE, "*.i.j", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.*.j", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-
-        assertResult(SAMPLE, "*.i.j.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.*.j.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.*.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.j.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-
-        assertResult(SAMPLE, "*.i.j.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.*.j.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.*.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.j.*.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.j.k.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-
-        assertResult(SAMPLE, "h.*.j.*.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "**.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-
-        assertResult(SAMPLE, "**.*2", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value'},{'g1':'g1_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "h", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "z", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+
+        assertResult(SAMPLE, "e.f1", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "e.f2", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "e.f*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'g1':'g1_value','g2':'g2_value'}]," +
+            "'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "e.*2", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value'},{'g1':'g1_value'}]," +
+            "'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+
+        assertResult(SAMPLE, "h.i", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.j", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.j.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.j.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+
+        assertResult(SAMPLE, "h.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "*.i", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+
+        assertResult(SAMPLE, "*.i.j", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.*.j", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+
+        assertResult(SAMPLE, "*.i.j.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.*.j.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.*.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.j.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+
+        assertResult(SAMPLE, "*.i.j.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.*.j.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.*.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.j.*.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.j.k.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+
+        assertResult(SAMPLE, "h.*.j.*.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "**.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+
+        assertResult(SAMPLE, "**.*2", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value'}," +
+            "{'g1':'g1_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
 
     }