From 31810837812b7b554a021d45a6cc920c7bdc1dcf Mon Sep 17 00:00:00 2001 From: markharwood Date: Fri, 26 Oct 2018 14:21:35 +0100 Subject: [PATCH 1/9] HLRC - add support for source exists API (#34519) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HLRC - add support for source exists API API re-uses the GetRequest object (following the precedent set by the plain “exists” api). Relates to #27205 --- .../client/RequestConverters.java | 12 ++++ .../client/RestHighLevelClient.java | 26 +++++++++ .../java/org/elasticsearch/client/CrudIT.java | 55 +++++++++++++++++++ .../client/RestHighLevelClientTests.java | 1 - .../high-level/document/exists.asciidoc | 7 +++ 5 files changed, 100 insertions(+), 1 deletion(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 106caea027e27..2ff944b0a5343 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -261,6 +261,18 @@ private static Request getStyleRequest(String method, GetRequest getRequest) { return request; } + + static Request sourceExists(GetRequest getRequest) { + Request request = new Request(HttpHead.METHOD_NAME, endpoint(getRequest.index(), getRequest.type(), getRequest.id(), "_source")); + + Params parameters = new Params(request); + parameters.withPreference(getRequest.preference()); + parameters.withRouting(getRequest.routing()); + parameters.withRefresh(getRequest.refresh()); + parameters.withRealtime(getRequest.realtime()); + // Version params are not currently supported by the source exists API so are not passed + return request; + } static Request multiGet(MultiGetRequest multiGetRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_mget"); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 342e3efbb6a35..7e8a965361426 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -727,6 +727,32 @@ public final void existsAsync(GetRequest getRequest, RequestOptions options, Act emptySet()); } + /** + * Checks for the existence of a document with a "_source" field. Returns true if it exists, false otherwise. + * See Source exists API + * on elastic.co + * @param getRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return true if the document and _source field exists, false otherwise + * @throws IOException in case there is a problem sending the request + */ + public boolean existsSource(GetRequest getRequest, RequestOptions options) throws IOException { + return performRequest(getRequest, RequestConverters::sourceExists, options, RestHighLevelClient::convertExistsResponse, emptySet()); + } + + /** + * Asynchronously checks for the existence of a document with a "_source" field. Returns true if it exists, false otherwise. + * See Source exists API + * on elastic.co + * @param getRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void existsSourceAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { + performRequestAsync(getRequest, RequestConverters::sourceExists, options, RestHighLevelClient::convertExistsResponse, listener, + emptySet()); + } + /** * Index a document using the Index API. * See Index API on elastic.co diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index e679a85f67f0c..1dd27cff0d92a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -194,6 +194,61 @@ public void testExists() throws IOException { assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); } } + + public void testSourceExists() throws IOException { + { + GetRequest getRequest = new GetRequest("index", "type", "id"); + assertFalse(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); + } + IndexRequest index = new IndexRequest("index", "type", "id"); + index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", XContentType.JSON); + index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); + highLevelClient().index(index, RequestOptions.DEFAULT); + { + GetRequest getRequest = new GetRequest("index", "type", "id"); + assertTrue(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); + } + { + GetRequest getRequest = new GetRequest("index", "type", "does_not_exist"); + assertFalse(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); + } + { + GetRequest getRequest = new GetRequest("index", "type", "does_not_exist").version(1); + assertFalse(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); + } + } + + public void testSourceDoesNotExist() throws IOException { + final String noSourceIndex = "no_source"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + String mapping = "\"_doc\": { \"_source\": {\n" + + " \"enabled\": false\n" + + " } }"; + createIndex(noSourceIndex, settings, mapping); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(noSourceIndex, "_doc", "1") + .source(Collections.singletonMap("foo", 1), XContentType.JSON)) + .add(new IndexRequest(noSourceIndex, "_doc", "2") + .source(Collections.singletonMap("foo", 2), XContentType.JSON)) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + GetRequest getRequest = new GetRequest(noSourceIndex, "_doc", "1"); + assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); + assertFalse(execute(getRequest, highLevelClient()::existsSource, highLevelClient()::existsSourceAsync)); + } + } public void testGet() throws IOException { { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 8f4ec4cc0ccca..d40c3196e54f4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -650,7 +650,6 @@ public void testApiNamingConventions() throws Exception { "cluster.remote_info", "count", "create", - "exists_source", "get_source", "indices.delete_alias", "indices.delete_template", diff --git a/docs/java-rest/high-level/document/exists.asciidoc b/docs/java-rest/high-level/document/exists.asciidoc index ac6968d1f3752..3a09203bab6c6 100644 --- a/docs/java-rest/high-level/document/exists.asciidoc +++ b/docs/java-rest/high-level/document/exists.asciidoc @@ -29,3 +29,10 @@ include-tagged::{doc-tests-file}[{api}-request] <5> Disable fetching stored fields. include::../execution.asciidoc[] + + +==== Source exists request +A variant of the exists request is `existsSource` method which has the additional check +that the document in question has stored the `source`. If the mapping for the index has opted +to remove support for storing JSON source in documents then this method will return false +for documents in this index. From 5c2c1f44c895129023bda848e718facaa6b220af Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Fri, 26 Oct 2018 08:01:38 -0600 Subject: [PATCH 2/9] [Style] Fix line lengths in action.admin.indices (#34890) Clean up lines over 140 characters in the the `org.elasticsearch.action.admin.indices` packages --- .../resources/checkstyle_suppressions.xml | 58 ------------------ .../alias/TransportIndicesAliasesAction.java | 6 +- .../exists/TransportAliasesExistAction.java | 9 ++- .../alias/get/BaseAliasesRequestBuilder.java | 4 +- .../alias/get/TransportGetAliasesAction.java | 9 ++- .../analyze/TransportAnalyzeAction.java | 60 ++++++++++++------- .../ClearIndicesCacheRequestBuilder.java | 3 +- .../close/TransportCloseIndexAction.java | 12 ++-- .../create/CreateIndexRequestBuilder.java | 3 +- .../create/TransportCreateIndexAction.java | 9 ++- .../delete/DeleteIndexRequestBuilder.java | 3 +- .../delete/TransportDeleteIndexAction.java | 9 ++- .../indices/IndicesExistsRequestBuilder.java | 3 +- .../indices/TransportIndicesExistsAction.java | 15 +++-- .../types/TransportTypesExistsAction.java | 12 ++-- .../types/TypesExistsRequestBuilder.java | 3 +- .../indices/flush/TransportFlushAction.java | 6 +- .../flush/TransportShardFlushAction.java | 3 +- .../forcemerge/ForceMergeRequestBuilder.java | 3 +- .../forcemerge/TransportForceMergeAction.java | 7 ++- .../get/GetFieldMappingsRequestBuilder.java | 3 +- .../get/GetMappingsRequestBuilder.java | 3 +- .../TransportGetFieldMappingsIndexAction.java | 3 +- .../put/TransportPutMappingAction.java | 16 +++-- .../open/TransportOpenIndexAction.java | 9 ++- .../recovery/TransportRecoveryAction.java | 4 +- .../refresh/TransportRefreshAction.java | 6 +- .../admin/indices/segments/IndexSegments.java | 5 +- .../IndicesSegmentsRequestBuilder.java | 3 +- .../TransportIndicesSegmentsAction.java | 13 ++-- .../get/GetSettingsRequestBuilder.java | 3 +- .../get/TransportGetSettingsAction.java | 6 +- .../put/TransportUpdateSettingsAction.java | 14 +++-- .../put/UpdateSettingsRequestBuilder.java | 3 +- .../IndicesShardStoreRequestBuilder.java | 5 +- .../shards/IndicesShardStoresResponse.java | 3 +- .../TransportIndicesShardStoresAction.java | 53 ++++++++++------ .../admin/indices/stats/IndexStats.java | 3 +- .../stats/IndicesStatsRequestBuilder.java | 3 +- .../stats/TransportIndicesStatsAction.java | 7 ++- .../DeleteIndexTemplateRequestBuilder.java | 3 +- .../TransportDeleteIndexTemplateAction.java | 35 ++++++----- .../get/GetIndexTemplatesRequestBuilder.java | 6 +- .../get/TransportGetIndexTemplatesAction.java | 9 ++- .../put/TransportPutIndexTemplateAction.java | 9 ++- .../upgrade/get/IndexUpgradeStatus.java | 5 +- .../get/TransportUpgradeStatusAction.java | 13 ++-- .../get/UpgradeStatusRequestBuilder.java | 3 +- .../upgrade/post/TransportUpgradeAction.java | 14 +++-- .../post/TransportUpgradeSettingsAction.java | 14 +++-- .../post/UpgradeSettingsRequestBuilder.java | 3 +- .../query/TransportValidateQueryAction.java | 9 ++- .../query/ValidateQueryRequestBuilder.java | 3 +- .../indices/TransportAnalyzeActionTests.java | 18 ++++-- .../clear/ClearIndicesCacheBlocksIT.java | 6 +- .../indices/flush/SyncedFlushUnitTests.java | 13 ++-- .../action/admin/indices/get/GetIndexIT.java | 3 +- .../shards/IndicesShardStoreRequestIT.java | 9 ++- .../IndicesShardStoreResponseTests.java | 40 +++++++++---- 59 files changed, 372 insertions(+), 245 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 592c1512d60cf..1297b305ea0c4 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -78,58 +78,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -371,12 +319,6 @@ - - - - - - diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index d6ecaf8b2c9f3..c0753899bc048 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -61,7 +61,8 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final IndicesAliasesRequest request, final ClusterState state, + final ActionListener listener) { //Expand the indices names List actions = request.aliasActions(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java index 6b77b9a39e97e..998b49623e5ae 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java @@ -36,8 +36,10 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadAction> extends MasterNodeReadOperationRequestBuilder { +public abstract class BaseAliasesRequestBuilder> + extends MasterNodeReadOperationRequestBuilder { public BaseAliasesRequestBuilder(ElasticsearchClient client, Action action, String... aliases) { super(client, action, new GetAliasesRequest(aliases)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 2b71e85a53761..faa075afca8fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -40,8 +40,10 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction tokenFilterFactoryList = - parseTokenFilterFactories(request, indexSettings, analysisRegistry, environment, new Tuple<>(keywordTokenizerName, keywordTokenizerFactory), charFilterFactoryList, true); + parseTokenFilterFactories(request, indexSettings, analysisRegistry, environment, + new Tuple<>(keywordTokenizerName, keywordTokenizerFactory), charFilterFactoryList, true); analyzer = new CustomAnalyzer("keyword_for_normalizer", keywordTokenizerFactory, @@ -311,7 +315,8 @@ private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analy TokenFilterFactory[] tokenFilterFactories = customAnalyzer.tokenFilters(); String[][] charFiltersTexts = new String[charFilterFactories != null ? charFilterFactories.length : 0][request.text().length]; - TokenListCreator[] tokenFiltersTokenListCreator = new TokenListCreator[tokenFilterFactories != null ? tokenFilterFactories.length : 0]; + TokenListCreator[] tokenFiltersTokenListCreator = new TokenListCreator[tokenFilterFactories != null ? + tokenFilterFactories.length : 0]; TokenListCreator tokenizerTokenListCreator = new TokenListCreator(maxTokenCount); @@ -348,14 +353,18 @@ private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analy } } - DetailAnalyzeResponse.CharFilteredText[] charFilteredLists = new DetailAnalyzeResponse.CharFilteredText[charFiltersTexts.length]; + DetailAnalyzeResponse.CharFilteredText[] charFilteredLists = + new DetailAnalyzeResponse.CharFilteredText[charFiltersTexts.length]; + if (charFilterFactories != null) { for (int charFilterIndex = 0; charFilterIndex < charFiltersTexts.length; charFilterIndex++) { charFilteredLists[charFilterIndex] = new DetailAnalyzeResponse.CharFilteredText( charFilterFactories[charFilterIndex].name(), charFiltersTexts[charFilterIndex]); } } - DetailAnalyzeResponse.AnalyzeTokenList[] tokenFilterLists = new DetailAnalyzeResponse.AnalyzeTokenList[tokenFiltersTokenListCreator.length]; + DetailAnalyzeResponse.AnalyzeTokenList[] tokenFilterLists = + new DetailAnalyzeResponse.AnalyzeTokenList[tokenFiltersTokenListCreator.length]; + if (tokenFilterFactories != null) { for (int tokenFilterIndex = 0; tokenFilterIndex < tokenFiltersTokenListCreator.length; tokenFilterIndex++) { tokenFilterLists[tokenFilterIndex] = new DetailAnalyzeResponse.AnalyzeTokenList( @@ -382,7 +391,9 @@ private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analy return detailResponse; } - private static TokenStream createStackedTokenStream(String source, CharFilterFactory[] charFilterFactories, TokenizerFactory tokenizerFactory, TokenFilterFactory[] tokenFilterFactories, int current) { + private static TokenStream createStackedTokenStream(String source, CharFilterFactory[] charFilterFactories, + TokenizerFactory tokenizerFactory, TokenFilterFactory[] tokenFilterFactories, + int current) { Reader reader = new StringReader(source); for (CharFilterFactory charFilterFactory : charFilterFactories) { reader = charFilterFactory.create(reader); @@ -457,7 +468,8 @@ private void analyze(TokenStream stream, Analyzer analyzer, String field, Set extractExtendedAttributes(TokenStream stream, return extendedAttributes; } - private static List parseCharFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry, - Environment environment, boolean normalizer) throws IOException { + private static List parseCharFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, + AnalysisRegistry analysisRegistry, Environment environment, + boolean normalizer) throws IOException { List charFilterFactoryList = new ArrayList<>(); if (request.charFilters() != null && request.charFilters().size() > 0) { List charFilters = request.charFilters(); @@ -536,7 +549,8 @@ private static List parseCharFilterFactories(AnalyzeRequest r throw new IllegalArgumentException("failed to find global char filter under [" + charFilterTypeName + "]"); } // Need to set anonymous "name" of char_filter - charFilterFactory = charFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_charfilter", settings); + charFilterFactory = charFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_charfilter", + settings); } else { AnalysisModule.AnalysisProvider charFilterFactoryFactory; if (indexSettings == null) { @@ -608,9 +622,11 @@ public TokenFilterFactory apply(String s) { } } - private static List parseTokenFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, AnalysisRegistry analysisRegistry, - Environment environment, Tuple tokenizerFactory, - List charFilterFactoryList, boolean normalizer) throws IOException { + private static List parseTokenFilterFactories(AnalyzeRequest request, IndexSettings indexSettings, + AnalysisRegistry analysisRegistry, Environment environment, + Tuple tokenizerFactory, + List charFilterFactoryList, + boolean normalizer) throws IOException { List tokenFilterFactoryList = new ArrayList<>(); DeferredTokenFilterRegistry deferredRegistry = new DeferredTokenFilterRegistry(analysisRegistry, indexSettings); if (request.tokenFilters() != null && request.tokenFilters().size() > 0) { @@ -630,7 +646,8 @@ private static List parseTokenFilterFactories(AnalyzeRequest throw new IllegalArgumentException("failed to find global token filter under [" + filterTypeName + "]"); } // Need to set anonymous "name" of tokenfilter - tokenFilterFactory = tokenFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenfilter", settings); + tokenFilterFactory = tokenFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenfilter", + settings); tokenFilterFactory = tokenFilterFactory.getChainAwareTokenFilterFactory(tokenizerFactory.v2(), charFilterFactoryList, tokenFilterFactoryList, deferredRegistry); @@ -650,8 +667,8 @@ private static List parseTokenFilterFactories(AnalyzeRequest Settings settings = AnalysisRegistry.getSettingsFromIndexSettings(indexSettings, AnalysisRegistry.INDEX_ANALYSIS_FILTER + "." + tokenFilter.name); tokenFilterFactory = tokenFilterFactoryFactory.get(indexSettings, environment, tokenFilter.name, settings); - tokenFilterFactory = tokenFilterFactory.getChainAwareTokenFilterFactory(tokenizerFactory.v2(), charFilterFactoryList, - tokenFilterFactoryList, deferredRegistry); + tokenFilterFactory = tokenFilterFactory.getChainAwareTokenFilterFactory(tokenizerFactory.v2(), + charFilterFactoryList, tokenFilterFactoryList, deferredRegistry); } } if (tokenFilterFactory == null) { @@ -709,7 +726,8 @@ private static Tuple parseTokenizerFactory(AnalyzeRequ return new Tuple<>(name, tokenizerFactory); } - private static TokenizerFactory getTokenizerFactory(AnalysisRegistry analysisRegistry, Environment environment, String name) throws IOException { + private static TokenizerFactory getTokenizerFactory(AnalysisRegistry analysisRegistry, Environment environment, + String name) throws IOException { AnalysisModule.AnalysisProvider tokenizerFactoryFactory; TokenizerFactory tokenizerFactory; tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(name); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java index 8cfe3d7b9096c..7a2bd9fd0f484 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBuilder { +public class ClearIndicesCacheRequestBuilder + extends BroadcastOperationRequestBuilder { public ClearIndicesCacheRequestBuilder(ElasticsearchClient client, ClearIndicesCacheAction action) { super(client, action, new ClearIndicesCacheRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index f2e284656e590..c612beea59520 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -58,7 +58,8 @@ public TransportCloseIndexAction(Settings settings, TransportService transportSe ThreadPool threadPool, MetaDataIndexStateService indexStateService, ClusterSettings clusterSettings, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { - super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, CloseIndexRequest::new); + super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + CloseIndexRequest::new); this.indexStateService = indexStateService; this.destructiveOperations = destructiveOperations; this.closeIndexEnabled = CLUSTER_INDICES_CLOSE_ENABLE_SETTING.get(settings); @@ -84,18 +85,21 @@ protected AcknowledgedResponse newResponse() { protected void doExecute(Task task, CloseIndexRequest request, ActionListener listener) { destructiveOperations.failDestructive(request.indices()); if (closeIndexEnabled == false) { - throw new IllegalStateException("closing indices is disabled - set [" + CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey() + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace"); + throw new IllegalStateException("closing indices is disabled - set [" + CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey() + + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace"); } super.doExecute(task, request, listener); } @Override protected ClusterBlockException checkBlock(CloseIndexRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request)); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, + indexNameExpressionResolver.concreteIndexNames(state, request)); } @Override - protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener listener) { + protected void masterOperation(final CloseIndexRequest request, final ClusterState state, + final ActionListener listener) { final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); if (concreteIndices == null || concreteIndices.length == 0) { listener.onResponse(new AcknowledgedResponse(true)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java index d2593e7e94be3..93b4184f958bc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -34,7 +34,8 @@ /** * Builder for a create index request */ -public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder { +public class CreateIndexRequestBuilder + extends AcknowledgedRequestBuilder { public CreateIndexRequestBuilder(ElasticsearchClient client, CreateIndexAction action) { super(client, action, new CreateIndexRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index e4384745d36e8..58467b6cc6b62 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -44,7 +44,8 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final CreateIndexRequest request, final ClusterState state, + final ActionListener listener) { String cause = request.cause(); if (cause.length() == 0) { cause = "api"; } final String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index()); - final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index()) + final CreateIndexClusterStateUpdateRequest updateRequest = + new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index()) .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .settings(request.settings()).mappings(request.mappings()) .aliases(request.aliases()) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java index 376a115b19627..10663a8dece39 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java @@ -24,7 +24,8 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ElasticsearchClient; -public class DeleteIndexRequestBuilder extends AcknowledgedRequestBuilder { +public class DeleteIndexRequestBuilder + extends AcknowledgedRequestBuilder { public DeleteIndexRequestBuilder(ElasticsearchClient client, DeleteIndexAction action, String... indices) { super(client, action, new DeleteIndexRequest(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 62421da891608..a7080209eca4d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -53,8 +53,10 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, + final ActionListener listener) { final Set concreteIndices = new HashSet<>(Arrays.asList(indexNameExpressionResolver.concreteIndices(state, request))); if (concreteIndices.isEmpty()) { listener.onResponse(new AcknowledgedResponse(true)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java index 87da4627047ef..39a2ca7ef0a13 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class IndicesExistsRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class IndicesExistsRequestBuilder + extends MasterNodeReadOperationRequestBuilder { public IndicesExistsRequestBuilder(ElasticsearchClient client, IndicesExistsAction action, String... indices) { super(client, action, new IndicesExistsRequest(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java index 2310c463581a0..5f486210b617c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java @@ -41,8 +41,10 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction< @Inject public TransportIndicesExistsAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, IndicesExistsAction.NAME, transportService, clusterService, threadPool, actionFilters, IndicesExistsRequest::new, indexNameExpressionResolver); + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, IndicesExistsAction.NAME, transportService, clusterService, threadPool, actionFilters, IndicesExistsRequest::new, + indexNameExpressionResolver); } @Override @@ -59,12 +61,15 @@ protected IndicesExistsResponse newResponse() { @Override protected ClusterBlockException checkBlock(IndicesExistsRequest request, ClusterState state) { //make sure through indices options that the concrete indices call never throws IndexMissingException - IndicesOptions indicesOptions = IndicesOptions.fromOptions(true, true, request.indicesOptions().expandWildcardsOpen(), request.indicesOptions().expandWildcardsClosed()); - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, request.indices())); + IndicesOptions indicesOptions = IndicesOptions.fromOptions(true, true, request.indicesOptions().expandWildcardsOpen(), + request.indicesOptions().expandWildcardsClosed()); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, + indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, request.indices())); } @Override - protected void masterOperation(final IndicesExistsRequest request, final ClusterState state, final ActionListener listener) { + protected void masterOperation(final IndicesExistsRequest request, final ClusterState state, + final ActionListener listener) { boolean exists; try { // Similar as the previous behaviour, but now also aliases and wildcards are supported. diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java index e63a27bef1818..223e738ad2068 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java @@ -40,8 +40,10 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadAction listener) { + protected void masterOperation(final TypesExistsRequest request, final ClusterState state, + final ActionListener listener) { String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), request.indices()); if (concreteIndices.length == 0) { listener.onResponse(new TypesExistsResponse(false)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java index f73dcdec22406..607a423605bfc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TypesExistsRequestBuilder.java @@ -27,7 +27,8 @@ * A builder for {@link TypesExistsRequest}. */ @Deprecated -public class TypesExistsRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class TypesExistsRequestBuilder + extends MasterNodeReadOperationRequestBuilder { /** * @param indices What indices to check for types diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index 7df54c1f123a1..35e19967a3e3a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -35,13 +35,15 @@ /** * Flush Action. */ -public class TransportFlushAction extends TransportBroadcastReplicationAction { +public class TransportFlushAction + extends TransportBroadcastReplicationAction { @Inject public TransportFlushAction(Settings settings, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportShardFlushAction replicatedFlushAction) { - super(FlushAction.NAME, FlushRequest::new, settings, clusterService, transportService, actionFilters, indexNameExpressionResolver, replicatedFlushAction); + super(FlushAction.NAME, FlushRequest::new, settings, clusterService, transportService, actionFilters, indexNameExpressionResolver, + replicatedFlushAction); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index ed1819a1d2480..344a817fa8b83 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -32,7 +32,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -public class TransportShardFlushAction extends TransportReplicationAction { +public class TransportShardFlushAction + extends TransportReplicationAction { public static final String NAME = FlushAction.NAME + "[s]"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java index 285ef99a70a0d..29f6891fd4bc2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java @@ -29,7 +29,8 @@ * merge down to. By default, will cause the force merge process to merge down * to half the configured number of segments. */ -public class ForceMergeRequestBuilder extends BroadcastOperationRequestBuilder { +public class ForceMergeRequestBuilder + extends BroadcastOperationRequestBuilder { public ForceMergeRequestBuilder(ElasticsearchClient client, ForceMergeAction action) { super(client, action, new ForceMergeRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index 94357575a9f72..621e2b870e90d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -43,7 +43,8 @@ /** * ForceMerge index/indices action. */ -public class TransportForceMergeAction extends TransportBroadcastByNodeAction { +public class TransportForceMergeAction + extends TransportBroadcastByNodeAction { private final IndicesService indicesService; @@ -62,7 +63,9 @@ protected EmptyResult readShardResult(StreamInput in) throws IOException { } @Override - protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalShards, int successfulShards, int failedShards, + List responses, List shardFailures, + ClusterState clusterState) { return new ForceMergeResponse(totalShards, successfulShards, failedShards, shardFailures); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java index a80ba8bf2ce8d..cbd0539c24485 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsRequestBuilder.java @@ -25,7 +25,8 @@ import org.elasticsearch.common.util.ArrayUtils; /** A helper class to build {@link GetFieldMappingsRequest} objects */ -public class GetFieldMappingsRequestBuilder extends ActionRequestBuilder { +public class GetFieldMappingsRequestBuilder + extends ActionRequestBuilder { public GetFieldMappingsRequestBuilder(ElasticsearchClient client, GetFieldMappingsAction action, String... indices) { super(client, action, new GetFieldMappingsRequest().indices(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 7ecb67139539f..f2e49ece9ea14 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.support.master.info.ClusterInfoRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class GetMappingsRequestBuilder extends ClusterInfoRequestBuilder { +public class GetMappingsRequestBuilder + extends ClusterInfoRequestBuilder { public GetMappingsRequestBuilder(ElasticsearchClient client, GetMappingsAction action, String... indices) { super(client, action, new GetMappingsRequest().indices(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index f9fc5880bbb5b..3ecd814194e1a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -58,7 +58,8 @@ /** * Transport action used to retrieve the mappings related to fields that belong to a specific index */ -public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAction { +public class TransportGetFieldMappingsIndexAction + extends TransportSingleShardAction { private static final String ACTION_NAME = GetFieldMappingsAction.NAME + "[index]"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index 9f29ffe4883bc..e18cd087666bd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -49,7 +49,8 @@ public class TransportPutMappingAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final PutMappingRequest request, final ClusterState state, + final ActionListener listener) { try { - final Index[] concreteIndices = request.getConcreteIndex() == null ? indexNameExpressionResolver.concreteIndices(state, request) : new Index[] {request.getConcreteIndex()}; + final Index[] concreteIndices = request.getConcreteIndex() == null ? + indexNameExpressionResolver.concreteIndices(state, request) + : new Index[] {request.getConcreteIndex()}; PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices).type(request.type()) @@ -93,12 +97,14 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", concreteIndices, request.type()), t); + logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", + concreteIndices, request.type()), t); listener.onFailure(t); } }); } catch (IndexNotFoundException ex) { - logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", request.indices(), request.type()), ex); + logger.debug(() -> new ParameterizedMessage("failed to put mappings on indices [{}], type [{}]", + request.indices(), request.type()), ex); throw ex; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index 1e89244b67644..0c3863e71433b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -51,7 +51,8 @@ public TransportOpenIndexAction(Settings settings, TransportService transportSer ThreadPool threadPool, MetaDataIndexStateService indexStateService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { - super(settings, OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, OpenIndexRequest::new); + super(settings, OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + OpenIndexRequest::new); this.indexStateService = indexStateService; this.destructiveOperations = destructiveOperations; } @@ -75,11 +76,13 @@ protected void doExecute(Task task, OpenIndexRequest request, ActionListener listener) { + protected void masterOperation(final OpenIndexRequest request, final ClusterState state, + final ActionListener listener) { final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); if (concreteIndices == null || concreteIndices.length == 0) { listener.onResponse(new OpenIndexResponse(true, true)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index dc0a9adb0753c..eaeeaa6758079 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -69,7 +69,9 @@ protected RecoveryState readShardResult(StreamInput in) throws IOException { @Override - protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { + protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, + List responses, List shardFailures, + ClusterState clusterState) { Map> shardResponses = new HashMap<>(); for (RecoveryState recoveryState : responses) { if (recoveryState == null) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 05a72c044348c..5d8ce537eeea0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -37,14 +37,16 @@ /** * Refresh action. */ -public class TransportRefreshAction extends TransportBroadcastReplicationAction { +public class TransportRefreshAction + extends TransportBroadcastReplicationAction { @Inject public TransportRefreshAction(Settings settings, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportShardRefreshAction shardRefreshAction) { - super(RefreshAction.NAME, RefreshRequest::new, settings, clusterService, transportService, actionFilters, indexNameExpressionResolver, shardRefreshAction); + super(RefreshAction.NAME, RefreshRequest::new, settings, clusterService, transportService, actionFilters, + indexNameExpressionResolver, shardRefreshAction); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexSegments.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexSegments.java index 42e18a1fddc0f..94e85a6a73af0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexSegments.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexSegments.java @@ -45,7 +45,8 @@ public class IndexSegments implements Iterable { } indexShards = new HashMap<>(); for (Map.Entry> entry : tmpIndexShards.entrySet()) { - indexShards.put(entry.getKey(), new IndexShardSegments(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardSegments[entry.getValue().size()]))); + indexShards.put(entry.getKey(), new IndexShardSegments(entry.getValue().get(0).getShardRouting().shardId(), + entry.getValue().toArray(new ShardSegments[entry.getValue().size()]))); } } @@ -65,4 +66,4 @@ public Map getShards() { public Iterator iterator() { return indexShards.values().iterator(); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java index ce4a5705168b2..27ec8fb6a716e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequestBuilder.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class IndicesSegmentsRequestBuilder extends BroadcastOperationRequestBuilder { +public class IndicesSegmentsRequestBuilder + extends BroadcastOperationRequestBuilder { public IndicesSegmentsRequestBuilder(ElasticsearchClient client, IndicesSegmentsAction action) { super(client, action, new IndicesSegmentsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java index 6b624e6baa792..e50748ed27b4d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java @@ -41,13 +41,15 @@ import java.io.IOException; import java.util.List; -public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeAction { +public class TransportIndicesSegmentsAction + extends TransportBroadcastByNodeAction { private final IndicesService indicesService; @Inject public TransportIndicesSegmentsAction(Settings settings, ClusterService clusterService, TransportService transportService, - IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + IndicesService indicesService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, IndicesSegmentsAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver, IndicesSegmentsRequest::new, ThreadPool.Names.MANAGEMENT); this.indicesService = indicesService; @@ -77,8 +79,11 @@ protected ShardSegments readShardResult(StreamInput in) throws IOException { } @Override - protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, List results, List shardFailures, ClusterState clusterState) { - return new IndicesSegmentResponse(results.toArray(new ShardSegments[results.size()]), totalShards, successfulShards, failedShards, shardFailures); + protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, + List results, List shardFailures, + ClusterState clusterState) { + return new IndicesSegmentResponse(results.toArray(new ShardSegments[results.size()]), totalShards, successfulShards, failedShards, + shardFailures); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java index 2fff2eca0c263..c4b58e90e6ebf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java @@ -24,7 +24,8 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; -public class GetSettingsRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class GetSettingsRequestBuilder + extends MasterNodeReadOperationRequestBuilder { public GetSettingsRequestBuilder(ElasticsearchClient client, GetSettingsAction action, String... indices) { super(client, action, new GetSettingsRequest().indices(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java index 0ffc7efa9524c..a758776cd155d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/TransportGetSettingsAction.java @@ -50,7 +50,8 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction listener) { + protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, + final ActionListener listener) { final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest() .indices(concreteIndices) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java index 834f3ba30148e..af29429785b19 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java @@ -31,7 +31,8 @@ /** * Builder for an update index settings request */ -public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder { +public class UpdateSettingsRequestBuilder + extends AcknowledgedRequestBuilder { public UpdateSettingsRequestBuilder(ElasticsearchClient client, UpdateSettingsAction action, String... indices) { super(client, action, new UpdateSettingsRequest(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java index cf38feae56f13..f44c24144030b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java @@ -28,7 +28,10 @@ /** * Request builder for {@link IndicesShardStoresRequest} */ -public class IndicesShardStoreRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class IndicesShardStoreRequestBuilder extends MasterNodeReadOperationRequestBuilder< + IndicesShardStoresRequest, + IndicesShardStoresResponse, + IndicesShardStoreRequestBuilder> { public IndicesShardStoreRequestBuilder(ElasticsearchClient client, Action action, String... indices) { super(client, action, new IndicesShardStoresRequest(indices)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index 72aeb7f757528..d87de21bc48d8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -276,7 +276,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws private ImmutableOpenMap>> storeStatuses; private List failures; - public IndicesShardStoresResponse(ImmutableOpenMap>> storeStatuses, List failures) { + public IndicesShardStoresResponse(ImmutableOpenMap>> storeStatuses, + List failures) { this.storeStatuses = storeStatuses; this.failures = failures; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 0741965f5e5c9..b64c376140266 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -59,17 +59,21 @@ import java.util.concurrent.ConcurrentLinkedQueue; /** - * Transport action that reads the cluster state for shards with the requested criteria (see {@link ClusterHealthStatus}) of specific indices - * and fetches store information from all the nodes using {@link TransportNodesListGatewayStartedShards} + * Transport action that reads the cluster state for shards with the requested criteria (see {@link ClusterHealthStatus}) of specific + * indices and fetches store information from all the nodes using {@link TransportNodesListGatewayStartedShards} */ -public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAction { +public class TransportIndicesShardStoresAction + extends TransportMasterNodeReadAction { private final TransportNodesListGatewayStartedShards listShardStoresInfo; @Inject - public TransportIndicesShardStoresAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, TransportNodesListGatewayStartedShards listShardStoresInfo) { - super(settings, IndicesShardStoresAction.NAME, transportService, clusterService, threadPool, actionFilters, IndicesShardStoresRequest::new, indexNameExpressionResolver); + public TransportIndicesShardStoresAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + TransportNodesListGatewayStartedShards listShardStoresInfo) { + super(settings, IndicesShardStoresAction.NAME, transportService, clusterService, threadPool, actionFilters, + IndicesShardStoresRequest::new, indexNameExpressionResolver); this.listShardStoresInfo = listShardStoresInfo; } @@ -84,7 +88,8 @@ protected IndicesShardStoresResponse newResponse() { } @Override - protected void masterOperation(IndicesShardStoresRequest request, ClusterState state, ActionListener listener) { + protected void masterOperation(IndicesShardStoresRequest request, ClusterState state, + ActionListener listener) { final RoutingTable routingTables = state.routingTable(); final RoutingNodes routingNodes = state.getRoutingNodes(); final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); @@ -116,7 +121,8 @@ protected void masterOperation(IndicesShardStoresRequest request, ClusterState s @Override protected ClusterBlockException checkBlock(IndicesShardStoresRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request)); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, + indexNameExpressionResolver.concreteIndexNames(state, request)); } private class AsyncShardStoresInfoFetches { @@ -127,7 +133,8 @@ private class AsyncShardStoresInfoFetches { private CountDown expectedOps; private final Queue fetchResponses; - AsyncShardStoresInfoFetches(DiscoveryNodes nodes, RoutingNodes routingNodes, Set shardIds, ActionListener listener) { + AsyncShardStoresInfoFetches(DiscoveryNodes nodes, RoutingNodes routingNodes, Set shardIds, + ActionListener listener) { this.nodes = nodes; this.routingNodes = routingNodes; this.shardIds = shardIds; @@ -154,7 +161,8 @@ private class InternalAsyncFetch extends AsyncShardFetch responses, List failures, long fetchingRound) { + protected synchronized void processAsyncFetch(List responses, List failures, + long fetchingRound) { fetchResponses.add(new Response(shardId, responses, failures)); if (expectedOps.countDown()) { finish(); @@ -162,37 +170,46 @@ protected synchronized void processAsyncFetch(List res } void finish() { - ImmutableOpenMap.Builder>> indicesStoreStatusesBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder>> + indicesStoreStatusesBuilder = ImmutableOpenMap.builder(); + java.util.List failureBuilder = new ArrayList<>(); for (Response fetchResponse : fetchResponses) { - ImmutableOpenIntMap> indexStoreStatuses = indicesStoreStatusesBuilder.get(fetchResponse.shardId.getIndexName()); + ImmutableOpenIntMap> indexStoreStatuses = + indicesStoreStatusesBuilder.get(fetchResponse.shardId.getIndexName()); final ImmutableOpenIntMap.Builder> indexShardsBuilder; if (indexStoreStatuses == null) { indexShardsBuilder = ImmutableOpenIntMap.builder(); } else { indexShardsBuilder = ImmutableOpenIntMap.builder(indexStoreStatuses); } - java.util.List storeStatuses = indexShardsBuilder.get(fetchResponse.shardId.id()); + java.util.List storeStatuses = indexShardsBuilder + .get(fetchResponse.shardId.id()); if (storeStatuses == null) { storeStatuses = new ArrayList<>(); } for (NodeGatewayStartedShards response : fetchResponse.responses) { if (shardExistsInNode(response)) { - IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndexName(), fetchResponse.shardId.id(), response.getNode()); - storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.allocationId(), allocationStatus, response.storeException())); + IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus( + fetchResponse.shardId.getIndexName(), fetchResponse.shardId.id(), response.getNode()); + storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.allocationId(), + allocationStatus, response.storeException())); } } CollectionUtil.timSort(storeStatuses); indexShardsBuilder.put(fetchResponse.shardId.id(), storeStatuses); indicesStoreStatusesBuilder.put(fetchResponse.shardId.getIndexName(), indexShardsBuilder.build()); for (FailedNodeException failure : fetchResponse.failures) { - failureBuilder.add(new IndicesShardStoresResponse.Failure(failure.nodeId(), fetchResponse.shardId.getIndexName(), fetchResponse.shardId.id(), failure.getCause())); + failureBuilder.add(new IndicesShardStoresResponse.Failure(failure.nodeId(), fetchResponse.shardId.getIndexName(), + fetchResponse.shardId.id(), failure.getCause())); } } - listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder))); + listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), + Collections.unmodifiableList(failureBuilder))); } - private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) { + private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, + DiscoveryNode node) { for (ShardRouting shardRouting : routingNodes.node(node.getId())) { ShardId shardId = shardRouting.shardId(); if (shardId.id() == shardID && shardId.getIndexName().equals(index)) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java index d8480519e5def..a36821a4b656a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexStats.java @@ -68,7 +68,8 @@ public Map getIndexShards() { } indexShards = new HashMap<>(); for (Map.Entry> entry : tmpIndexShards.entrySet()) { - indexShards.put(entry.getKey(), new IndexShardStats(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardStats[entry.getValue().size()]))); + indexShards.put(entry.getKey(), new IndexShardStats(entry.getValue().get(0).getShardRouting().shardId(), + entry.getValue().toArray(new ShardStats[entry.getValue().size()]))); } return indexShards; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java index 8e7afe3e7e308..525e6f13a908d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java @@ -31,7 +31,8 @@ * All the stats to be returned can be cleared using {@link #clear()}, at which point, specific * stats can be enabled. */ -public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder { +public class IndicesStatsRequestBuilder + extends BroadcastOperationRequestBuilder { public IndicesStatsRequestBuilder(ElasticsearchClient client, IndicesStatsAction action) { super(client, action, new IndicesStatsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index d09aa58938450..85af9b2d5b64b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -82,8 +82,11 @@ protected ShardStats readShardResult(StreamInput in) throws IOException { } @Override - protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { - return new IndicesStatsResponse(responses.toArray(new ShardStats[responses.size()]), totalShards, successfulShards, failedShards, shardFailures); + protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, + List responses, List shardFailures, + ClusterState clusterState) { + return new IndicesStatsResponse(responses.toArray(new ShardStats[responses.size()]), totalShards, successfulShards, failedShards, + shardFailures); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java index 5f1119f0f0db1..9826404c598d9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class DeleteIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder { +public class DeleteIndexTemplateRequestBuilder + extends MasterNodeOperationRequestBuilder { public DeleteIndexTemplateRequestBuilder(ElasticsearchClient client, DeleteIndexTemplateAction action) { super(client, action, new DeleteIndexTemplateRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index 7f9fc03210675..0d17b38d91902 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -37,7 +37,8 @@ /** * Delete index action. */ -public class TransportDeleteIndexTemplateAction extends TransportMasterNodeAction { +public class TransportDeleteIndexTemplateAction + extends TransportMasterNodeAction { private final MetaDataIndexTemplateService indexTemplateService; @@ -45,7 +46,8 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeActio public TransportDeleteIndexTemplateAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataIndexTemplateService indexTemplateService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, DeleteIndexTemplateAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteIndexTemplateRequest::new); + super(settings, DeleteIndexTemplateAction.NAME, transportService, clusterService, threadPool, actionFilters, + indexNameExpressionResolver, DeleteIndexTemplateRequest::new); this.indexTemplateService = indexTemplateService; } @@ -66,18 +68,23 @@ protected ClusterBlockException checkBlock(DeleteIndexTemplateRequest request, C } @Override - protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, final ActionListener listener) { - indexTemplateService.removeTemplates(new MetaDataIndexTemplateService.RemoveRequest(request.name()).masterTimeout(request.masterNodeTimeout()), new MetaDataIndexTemplateService.RemoveListener() { - @Override - public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) { - listener.onResponse(new AcknowledgedResponse(response.acknowledged())); - } + protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, + final ActionListener listener) { + indexTemplateService.removeTemplates( + new MetaDataIndexTemplateService + .RemoveRequest(request.name()) + .masterTimeout(request.masterNodeTimeout()), + new MetaDataIndexTemplateService.RemoveListener() { + @Override + public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) { + listener.onResponse(new AcknowledgedResponse(response.acknowledged())); + } - @Override - public void onFailure(Exception e) { - logger.debug(() -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e); - listener.onFailure(e); - } - }); + @Override + public void onFailure(Exception e) { + logger.debug(() -> new ParameterizedMessage("failed to delete templates [{}]", request.name()), e); + listener.onFailure(e); + } + }); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java index 5068f11a0d201..58d3587518c09 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java @@ -21,7 +21,10 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequestBuilder< + GetIndexTemplatesRequest, + GetIndexTemplatesResponse, + GetIndexTemplatesRequestBuilder> { public GetIndexTemplatesRequestBuilder(ElasticsearchClient client, GetIndexTemplatesAction action) { super(client, action, new GetIndexTemplatesRequest()); @@ -31,3 +34,4 @@ public GetIndexTemplatesRequestBuilder(ElasticsearchClient client, GetIndexTempl super(client, action, new GetIndexTemplatesRequest(names)); } } + diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index 82c8bcec9b020..e66969854aa1f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -42,8 +42,10 @@ public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadAct @Inject public TransportGetIndexTemplatesAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetIndexTemplatesAction.NAME, transportService, clusterService, threadPool, actionFilters, GetIndexTemplatesRequest::new, indexNameExpressionResolver); + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, GetIndexTemplatesAction.NAME, transportService, clusterService, threadPool, actionFilters, + GetIndexTemplatesRequest::new, indexNameExpressionResolver); } @Override @@ -62,7 +64,8 @@ protected GetIndexTemplatesResponse newResponse() { } @Override - protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, ActionListener listener) { + protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, + ActionListener listener) { List results; // If we did not ask for a specific name, then we return all templates diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 34eccbf9d8a40..ae3a799453d67 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -47,8 +47,10 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction

listener) { + protected void masterOperation(final PutIndexTemplateRequest request, final ClusterState state, + final ActionListener listener) { String cause = request.cause(); if (cause.length() == 0) { cause = "api"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java index cae0fd6bfa6fe..68054595701ad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java @@ -45,7 +45,8 @@ public class IndexUpgradeStatus implements Iterable { } indexShards = new HashMap<>(); for (Map.Entry> entry : tmpIndexShards.entrySet()) { - indexShards.put(entry.getKey(), new IndexShardUpgradeStatus(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardUpgradeStatus[entry.getValue().size()]))); + indexShards.put(entry.getKey(), new IndexShardUpgradeStatus(entry.getValue().get(0).getShardRouting().shardId(), + entry.getValue().toArray(new ShardUpgradeStatus[entry.getValue().size()]))); } } @@ -91,4 +92,4 @@ public long getToUpgradeBytesAncient() { } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java index 603b25f6ab414..2958538f83bc9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java @@ -43,13 +43,15 @@ import java.io.IOException; import java.util.List; -public class TransportUpgradeStatusAction extends TransportBroadcastByNodeAction { +public class TransportUpgradeStatusAction + extends TransportBroadcastByNodeAction { private final IndicesService indicesService; @Inject public TransportUpgradeStatusAction(Settings settings, ClusterService clusterService, TransportService transportService, - IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + IndicesService indicesService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, UpgradeStatusAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpgradeStatusRequest::new, ThreadPool.Names.MANAGEMENT); this.indicesService = indicesService; @@ -79,8 +81,11 @@ protected ShardUpgradeStatus readShardResult(StreamInput in) throws IOException } @Override - protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, List responses, List shardFailures, ClusterState clusterState) { - return new UpgradeStatusResponse(responses.toArray(new ShardUpgradeStatus[responses.size()]), totalShards, successfulShards, failedShards, shardFailures); + protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, + List responses, + List shardFailures, ClusterState clusterState) { + return new UpgradeStatusResponse(responses.toArray(new ShardUpgradeStatus[responses.size()]), totalShards, successfulShards, + failedShards, shardFailures); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java index cee5bdcabe59d..e359b191ffdba 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/get/UpgradeStatusRequestBuilder.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class UpgradeStatusRequestBuilder extends BroadcastOperationRequestBuilder { +public class UpgradeStatusRequestBuilder + extends BroadcastOperationRequestBuilder { public UpgradeStatusRequestBuilder(ElasticsearchClient client, UpgradeStatusAction action) { super(client, action, new UpgradeStatusRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java index 918b8a06056d2..c44ad9e70bae7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -66,13 +66,16 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction shardUpgradeResults, List shardFailures, ClusterState clusterState) { + protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, int successfulShards, int failedShards, + List shardUpgradeResults, + List shardFailures, ClusterState clusterState) { Map successfulPrimaryShards = new HashMap<>(); Map> versions = new HashMap<>(); for (ShardUpgradeResult result : shardUpgradeResults) { @@ -111,8 +114,8 @@ protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, i if (primaryCount == metaData.index(index).getNumberOfShards()) { updatedVersions.put(index, new Tuple<>(versionEntry.getValue().v1(), versionEntry.getValue().v2().toString())); } else { - logger.warn("Not updating settings for the index [{}] because upgraded of some primary shards failed - expected[{}], received[{}]", index, - expectedPrimaryCount, primaryCount == null ? 0 : primaryCount); + logger.warn("Not updating settings for the index [{}] because upgraded of some primary shards failed - " + + "expected[{}], received[{}]", index, expectedPrimaryCount, primaryCount == null ? 0 : primaryCount); } } @@ -152,7 +155,8 @@ protected ShardsIterator shards(ClusterState clusterState, UpgradeRequest reques return iterator; } // If some primary shards are not available the request should fail. - throw new PrimaryMissingActionException("Cannot upgrade indices because the following indices are missing primary shards " + indicesWithMissingPrimaries); + throw new PrimaryMissingActionException("Cannot upgrade indices because the following indices are missing primary shards " + + indicesWithMissingPrimaries); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index 7c4aa406b2101..ff68a3e88a469 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -41,9 +41,11 @@ public class TransportUpgradeSettingsAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final UpgradeSettingsRequest request, final ClusterState state, + final ActionListener listener) { UpgradeSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpgradeSettingsClusterStateUpdateRequest() .ackTimeout(request.timeout()) .versions(request.versions()) @@ -78,7 +81,8 @@ public void onResponse(ClusterStateUpdateResponse response) { @Override public void onFailure(Exception t) { - logger.debug(() -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", request.versions().keySet()), t); + logger.debug(() -> new ParameterizedMessage("failed to upgrade minimum compatibility version settings on indices [{}]", + request.versions().keySet()), t); listener.onFailure(t); } }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java index e3a48066bbfe0..853077a67a752 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/UpgradeSettingsRequestBuilder.java @@ -30,7 +30,8 @@ /** * Builder for an update index settings request */ -public class UpgradeSettingsRequestBuilder extends AcknowledgedRequestBuilder { +public class UpgradeSettingsRequestBuilder + extends AcknowledgedRequestBuilder { public UpgradeSettingsRequestBuilder(ElasticsearchClient client, UpgradeSettingsAction action) { super(client, action, new UpgradeSettingsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 2b3c8a7bbcc33..5c13c1cde28db 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -59,7 +59,11 @@ import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.LongSupplier; -public class TransportValidateQueryAction extends TransportBroadcastAction { +public class TransportValidateQueryAction extends TransportBroadcastAction< + ValidateQueryRequest, + ValidateQueryResponse, + ShardValidateQueryRequest, + ShardValidateQueryResponse> { private final SearchService searchService; @@ -146,7 +150,8 @@ protected ClusterBlockException checkRequestBlock(ClusterState state, ValidateQu } @Override - protected ValidateQueryResponse newResponse(ValidateQueryRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { + protected ValidateQueryResponse newResponse(ValidateQueryRequest request, AtomicReferenceArray shardsResponses, + ClusterState clusterState) { int successfulShards = 0; int failedShards = 0; boolean valid = true; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java index bd8067e05cb9f..bf34f8b27b4fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequestBuilder.java @@ -23,7 +23,8 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.index.query.QueryBuilder; -public class ValidateQueryRequestBuilder extends BroadcastOperationRequestBuilder { +public class ValidateQueryRequestBuilder + extends BroadcastOperationRequestBuilder { public ValidateQueryRequestBuilder(ElasticsearchClient client, ValidateQueryAction action) { super(client, action, new ValidateQueryRequest()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java index c0404a47ab237..b0c2e34c30620 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java @@ -148,7 +148,8 @@ public void testNoIndexAnalyzers() throws IOException { request.text("the qu1ck brown fox"); request.tokenizer("standard"); request.addTokenFilter("mock"); - analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, maxTokenCount); + analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, + maxTokenCount); tokens = analyze.getTokens(); assertEquals(3, tokens.size()); assertEquals("qu1ck", tokens.get(0).getTerm()); @@ -160,7 +161,8 @@ public void testNoIndexAnalyzers() throws IOException { request.text("the qu1ck brown fox"); request.tokenizer("standard"); request.addCharFilter("append_foo"); - analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, maxTokenCount); + analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, + maxTokenCount); tokens = analyze.getTokens(); assertEquals(4, tokens.size()); assertEquals("the", tokens.get(0).getTerm()); @@ -174,7 +176,8 @@ public void testNoIndexAnalyzers() throws IOException { request.tokenizer("standard"); request.addCharFilter("append"); request.text("the qu1ck brown fox"); - analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, maxTokenCount); + analyze = TransportAnalyzeAction.analyze(request, "text", null, randomBoolean() ? indexAnalyzers : null, registry, environment, + maxTokenCount); tokens = analyze.getTokens(); assertEquals(4, tokens.size()); assertEquals("the", tokens.get(0).getTerm()); @@ -219,7 +222,8 @@ public void testWithIndexAnalyzers() throws IOException { AnalyzeRequest request = new AnalyzeRequest(); request.text("the quick brown fox"); request.analyzer("custom_analyzer"); - AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, maxTokenCount); + AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, + maxTokenCount); List tokens = analyze.getTokens(); assertEquals(3, tokens.size()); assertEquals("quick", tokens.get(0).getTerm()); @@ -333,7 +337,8 @@ public void testNonPreBuildTokenFilter() throws IOException { request.tokenizer("standard"); request.addTokenFilter("stop"); // stop token filter is not prebuilt in AnalysisModule#setupPreConfiguredTokenFilters() request.text("the quick brown fox"); - AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, maxTokenCount); + AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, + maxTokenCount); List tokens = analyze.getTokens(); assertEquals(3, tokens.size()); assertEquals("quick", tokens.get(0).getTerm()); @@ -345,7 +350,8 @@ public void testNormalizerWithIndex() throws IOException { AnalyzeRequest request = new AnalyzeRequest("index"); request.normalizer("my_normalizer"); request.text("ABc"); - AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, maxTokenCount); + AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, + maxTokenCount); List tokens = analyze.getTokens(); assertEquals(1, tokens.size()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java index ee1f4dd24e2f4..c4454adebb8e1 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java @@ -45,7 +45,8 @@ public void testClearIndicesCacheWithBlocks() { for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { try { enableIndexBlock("test", blockSetting); - ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setQueryCache(true).setFieldDataCache(true).execute().actionGet(); + ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().prepareClearCache("test") + .setFieldDataCache(true).setQueryCache(true).setFieldDataCache(true).execute().actionGet(); assertNoFailures(clearIndicesCacheResponse); assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { @@ -56,7 +57,8 @@ public void testClearIndicesCacheWithBlocks() { for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA, SETTING_READ_ONLY_ALLOW_DELETE)) { try { enableIndexBlock("test", blockSetting); - assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setQueryCache(true).setFieldDataCache(true)); + assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setQueryCache(true) + .setFieldDataCache(true)); } finally { disableIndexBlock("test", blockSetting); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java index 7040c92ec1d27..f6ca1c4f742a0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java @@ -107,7 +107,8 @@ public void testResponseStreaming() throws IOException { assertThat(originalShardResult.syncId(), equalTo(readShardResult.syncId())); assertThat(originalShardResult.totalShards(), equalTo(readShardResult.totalShards())); assertThat(originalShardResult.failedShards().size(), equalTo(readShardResult.failedShards().size())); - for (Map.Entry shardEntry : originalShardResult.failedShards().entrySet()) { + for (Map.Entry shardEntry + : originalShardResult.failedShards().entrySet()) { SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.failedShards().get(shardEntry.getKey()); assertNotNull(readShardResponse); SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); @@ -115,8 +116,10 @@ public void testResponseStreaming() throws IOException { assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); } assertThat(originalShardResult.shardResponses().size(), equalTo(readShardResult.shardResponses().size())); - for (Map.Entry shardEntry : originalShardResult.shardResponses().entrySet()) { - SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.shardResponses().get(shardEntry.getKey()); + for (Map.Entry shardEntry + : originalShardResult.shardResponses().entrySet()) { + SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.shardResponses() + .get(shardEntry.getKey()); assertNotNull(readShardResponse); SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); @@ -157,8 +160,8 @@ protected TestPlan createTestPlan() { } else { Map shardResponses = new HashMap<>(); for (int copy = 0; copy < replicas + 1; copy++) { - final ShardRouting shardRouting = TestShardRouting.newShardRouting(index, shard, "node_" + shardId + "_" + copy, null, - copy == 0, ShardRoutingState.STARTED); + final ShardRouting shardRouting = TestShardRouting.newShardRouting(index, shard, "node_" + shardId + "_" + copy, + null, copy == 0, ShardRoutingState.STARTED); if (randomInt(5) < 2) { // shard copy failure failed++; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java index 48914fca13133..91479e4bfe192 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java @@ -197,7 +197,8 @@ public void testGetIndexWithBlocks() { try { enableIndexBlock("idx", SETTING_BLOCKS_METADATA); - assertBlocked(client().admin().indices().prepareGetIndex().addIndices("idx").addFeatures(Feature.MAPPINGS, Feature.ALIASES), INDEX_METADATA_BLOCK); + assertBlocked(client().admin().indices().prepareGetIndex().addIndices("idx").addFeatures(Feature.MAPPINGS, Feature.ALIASES), + INDEX_METADATA_BLOCK); } finally { disableIndexBlock("idx", SETTING_BLOCKS_METADATA); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index 103e7db07a9fb..70d45d1db3ac6 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -117,7 +117,8 @@ public void testBasic() throws Exception { assertThat(shardStoresStatuses.size(), equalTo(unassignedShards.size())); for (IntObjectCursor> storesStatus : shardStoresStatuses) { assertThat("must report for one store", storesStatus.value.size(), equalTo(1)); - assertThat("reported store should be primary", storesStatus.value.get(0).getAllocationStatus(), equalTo(IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY)); + assertThat("reported store should be primary", storesStatus.value.get(0).getAllocationStatus(), + equalTo(IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY)); } logger.info("--> enable allocation"); enableAllocation(index); @@ -136,8 +137,10 @@ public void testIndices() throws Exception { indexRandomData(index1); indexRandomData(index2); ensureGreen(); - IndicesShardStoresResponse response = client().admin().indices().shardStores(Requests.indicesShardStoresRequest().shardStatuses("all")).get(); - ImmutableOpenMap>> shardStatuses = response.getStoreStatuses(); + IndicesShardStoresResponse response = client().admin().indices() + .shardStores(Requests.indicesShardStoresRequest().shardStatuses("all")).get(); + ImmutableOpenMap>> + shardStatuses = response.getStoreStatuses(); assertThat(shardStatuses.containsKey(index1), equalTo(true)); assertThat(shardStatuses.containsKey(index2), equalTo(true)); assertThat(shardStatuses.get(index1).size(), equalTo(2)); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java index 661f47b38a8a4..2a2f32e6894dd 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java @@ -46,16 +46,21 @@ public class IndicesShardStoreResponseTests extends ESTestCase { public void testBasicSerialization() throws Exception { - ImmutableOpenMap.Builder>> indexStoreStatuses = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder>> + indexStoreStatuses = ImmutableOpenMap.builder(); + List failures = new ArrayList<>(); ImmutableOpenIntMap.Builder> storeStatuses = ImmutableOpenIntMap.builder(); DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); List storeStatusList = new ArrayList<>(); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, new IOException("corrupted"))); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, null, + IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, UUIDs.randomBase64UUID(), + IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), + IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, new IOException("corrupted"))); storeStatuses.put(0, storeStatusList); storeStatuses.put(1, storeStatusList); ImmutableOpenIntMap> storesMap = storeStatuses.build(); @@ -64,7 +69,8 @@ public void testBasicSerialization() throws Exception { failures.add(new IndicesShardStoresResponse.Failure("node1", "test", 3, new NodeDisconnectedException(node1, ""))); - IndicesShardStoresResponse storesResponse = new IndicesShardStoresResponse(indexStoreStatuses.build(), Collections.unmodifiableList(failures)); + IndicesShardStoresResponse storesResponse = new IndicesShardStoresResponse(indexStoreStatuses.build(), + Collections.unmodifiableList(failures)); XContentBuilder contentBuilder = XContentFactory.jsonBuilder(); contentBuilder.startObject(); storesResponse.toXContent(contentBuilder, ToXContent.EMPTY_PARAMS); @@ -117,14 +123,22 @@ public void testBasicSerialization() throws Exception { public void testStoreStatusOrdering() throws Exception { DiscoveryNode node1 = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); List orderedStoreStatuses = new ArrayList<>(); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted"))); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted"))); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), + IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), + IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), + IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, + IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, + IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, + IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, UUIDs.randomBase64UUID(), + IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted"))); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, null, + IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted"))); List storeStatuses = new ArrayList<>(orderedStoreStatuses); Collections.shuffle(storeStatuses, random()); From 1b879ea8ac3a4feeb88b4be6cbe285fa16253fa4 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Fri, 26 Oct 2018 16:26:45 +0200 Subject: [PATCH 3/9] Refactor children aggregator into a generic ParentJoinAggregator (#34845) This commit adds a new ParentJoinAggregator that implements a join using global ordinals in a way that can be reused by the `children` and the upcoming `parent` aggregation. This new aggregator is a refactor of the existing ParentToChildrenAggregator with two main changes: * It uses a dense bit array instead of a long array when the aggregation does not have any parent. * It uses a single aggregator per bucket if it is nested under another aggregation. For the latter case we use a `MultiBucketAggregatorWrapper` in the factory in order to ensure that each instance of the aggregator handles a single bucket. This is more inlined with the strategy we use for other aggregations like `terms` aggregation for instance since the number of buckets to handle should be low (thanks to the breadth_first strategy). This change is also required for #34210 which adds the `parent` aggregation in the parent-join module. Relates #34508 --- .../ChildrenAggregatorFactory.java | 38 ++-- .../aggregations/ParentJoinAggregator.java | 173 ++++++++++++++++++ .../ParentToChildrenAggregator.java | 143 +-------------- .../composite => common/util}/BitArray.java | 6 +- .../bucket/composite/DoubleValuesSource.java | 3 +- .../bucket/composite/LongValuesSource.java | 3 +- .../util}/BitArrayTests.java | 5 +- 7 files changed, 209 insertions(+), 162 deletions(-) create mode 100644 modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java rename server/src/main/java/org/elasticsearch/{search/aggregations/bucket/composite => common/util}/BitArray.java (92%) rename server/src/test/java/org/elasticsearch/{search/aggregations/bucket/composite => common/util}/BitArrayTests.java (90%) diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java index 9c38fa2eae6b9..1f466f1020d18 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregatorFactory.java @@ -35,39 +35,49 @@ import java.util.List; import java.util.Map; -public class ChildrenAggregatorFactory - extends ValuesSourceAggregatorFactory { +public class ChildrenAggregatorFactory extends ValuesSourceAggregatorFactory { private final Query parentFilter; private final Query childFilter; - public ChildrenAggregatorFactory(String name, ValuesSourceConfig config, - Query childFilter, Query parentFilter, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { + public ChildrenAggregatorFactory(String name, + ValuesSourceConfig config, + Query childFilter, + Query parentFilter, + SearchContext context, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); + this.childFilter = childFilter; this.parentFilter = parentFilter; } @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { + protected Aggregator createUnmapped(Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { return new NonCollectingAggregator(name, context, parent, pipelineAggregators, metaData) { - @Override public InternalAggregation buildEmptyAggregation() { return new InternalChildren(name, 0, buildEmptySubAggregations(), pipelineAggregators(), metaData()); } - }; } @Override - protected Aggregator doCreateInternal(WithOrdinals valuesSource, Aggregator parent, - boolean collectsFromSingleBucket, List pipelineAggregators, Map metaData) - throws IOException { + protected Aggregator doCreateInternal(WithOrdinals valuesSource, + Aggregator parent, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + long maxOrd = valuesSource.globalMaxOrd(context.searcher()); - return new ParentToChildrenAggregator(name, factories, context, parent, childFilter, - parentFilter, valuesSource, maxOrd, pipelineAggregators, metaData); + if (collectsFromSingleBucket) { + return new ParentToChildrenAggregator(name, factories, context, parent, childFilter, + parentFilter, valuesSource, maxOrd, pipelineAggregators, metaData); + } else { + return asMultiBucketAggregator(this, context, parent); + } } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java new file mode 100644 index 0000000000000..46e358319a28a --- /dev/null +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentJoinAggregator.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.join.aggregations; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.Bits; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * An aggregator that joins documents based on global ordinals. + * Global ordinals that match the main query and the inFilter query are replayed + * with documents matching the outFilter query. + */ +public abstract class ParentJoinAggregator extends BucketsAggregator implements SingleBucketAggregator { + private final Weight inFilter; + private final Weight outFilter; + private final ValuesSource.Bytes.WithOrdinals valuesSource; + private final boolean singleAggregator; + + /** + * If this aggregator is nested under another aggregator we allocate a long hash per bucket. + */ + private final LongHash ordsHash; + /** + * Otherwise we use a dense bit array to record the global ordinals. + */ + private final BitArray ordsBit; + + public ParentJoinAggregator(String name, + AggregatorFactories factories, + SearchContext context, + Aggregator parent, + Query inFilter, + Query outFilter, + ValuesSource.Bytes.WithOrdinals valuesSource, + long maxOrd, + List pipelineAggregators, + Map metaData) throws IOException { + super(name, factories, context, parent, pipelineAggregators, metaData); + + if (maxOrd > Integer.MAX_VALUE) { + throw new IllegalStateException("the number of parent [" + maxOrd + "] + is greater than the allowed limit " + + "for this aggregation: " + Integer.MAX_VALUE); + } + + // these two filters are cached in the parser + this.inFilter = context.searcher().createWeight(context.searcher().rewrite(inFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); + this.outFilter = context.searcher().createWeight(context.searcher().rewrite(outFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); + this.valuesSource = valuesSource; + this.singleAggregator = parent == null; + this.ordsBit = singleAggregator ? new BitArray((int) maxOrd, context.bigArrays()) : null; + this.ordsHash = singleAggregator ? null : new LongHash(1, context.bigArrays()); + } + + private void addGlobalOrdinal(int globalOrdinal) { + if (singleAggregator) { + ordsBit.set(globalOrdinal); + } else { + ordsHash.add(globalOrdinal); + } + } + + private boolean existsGlobalOrdinal(int globalOrdinal) { + return singleAggregator ? ordsBit.get(globalOrdinal): ordsHash.find(globalOrdinal) >= 0; + } + + @Override + public final LeafBucketCollector getLeafCollector(LeafReaderContext ctx, + final LeafBucketCollector sub) throws IOException { + if (valuesSource == null) { + return LeafBucketCollector.NO_OP_COLLECTOR; + } + final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx); + final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), inFilter.scorerSupplier(ctx)); + return new LeafBucketCollector() { + @Override + public void collect(int docId, long bucket) throws IOException { + assert bucket == 0; + if (parentDocs.get(docId) && globalOrdinals.advanceExact(docId)) { + int globalOrdinal = (int) globalOrdinals.nextOrd(); + assert globalOrdinal != -1 && globalOrdinals.nextOrd() == SortedSetDocValues.NO_MORE_ORDS; + addGlobalOrdinal(globalOrdinal); + } + } + }; + } + + @Override + protected final void doPostCollection() throws IOException { + IndexReader indexReader = context().searcher().getIndexReader(); + for (LeafReaderContext ctx : indexReader.leaves()) { + Scorer childDocsScorer = outFilter.scorer(ctx); + if (childDocsScorer == null) { + continue; + } + DocIdSetIterator childDocsIter = childDocsScorer.iterator(); + + final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx); + + final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx); + // Set the scorer, since we now replay only the child docIds + sub.setScorer(new Scorable() { + @Override + public float score() { + return 1f; + } + + @Override + public int docID() { + return childDocsIter.docID(); + } + }); + + final Bits liveDocs = ctx.reader().getLiveDocs(); + for (int docId = childDocsIter.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = childDocsIter.nextDoc()) { + if (liveDocs != null && liveDocs.get(docId) == false) { + continue; + } + if (globalOrdinals.advanceExact(docId)) { + int globalOrdinal = (int) globalOrdinals.nextOrd(); + assert globalOrdinal != -1 && globalOrdinals.nextOrd() == SortedSetDocValues.NO_MORE_ORDS; + if (existsGlobalOrdinal(globalOrdinal)) { + collectBucket(sub, docId, 0); + } + } + } + } + } + + @Override + protected void doClose() { + Releasables.close(ordsBit, ordsHash); + } +} diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java index 064d1d1e5977c..3990e8697ef63 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java @@ -18,73 +18,28 @@ */ package org.elasticsearch.join.aggregations; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorable; -import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Weight; -import org.apache.lucene.util.Bits; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.util.LongArray; -import org.elasticsearch.common.util.LongObjectPagedHashMap; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.LeafBucketCollector; -import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; -import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; -import java.util.Arrays; import java.util.List; import java.util.Map; -// The RecordingPerReaderBucketCollector assumes per segment recording which isn't the case for this -// aggregation, for this reason that collector can't be used -public class ParentToChildrenAggregator extends BucketsAggregator implements SingleBucketAggregator { +public class ParentToChildrenAggregator extends ParentJoinAggregator { static final ParseField TYPE_FIELD = new ParseField("type"); - private final Weight childFilter; - private final Weight parentFilter; - private final ValuesSource.Bytes.WithOrdinals valuesSource; - - // Maybe use PagedGrowableWriter? This will be less wasteful than LongArray, - // but then we don't have the reuse feature of BigArrays. - // Also if we know the highest possible value that a parent agg will create - // then we store multiple values into one slot - private final LongArray parentOrdToBuckets; - - // Only pay the extra storage price if the a parentOrd has multiple buckets - // Most of the times a parent doesn't have multiple buckets, since there is - // only one document per parent ord, - // only in the case of terms agg if a parent doc has multiple terms per - // field this is needed: - private final LongObjectPagedHashMap parentOrdToOtherBuckets; - private boolean multipleBucketsPerParentOrd = false; - public ParentToChildrenAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent, Query childFilter, Query parentFilter, ValuesSource.Bytes.WithOrdinals valuesSource, - long maxOrd, List pipelineAggregators, Map metaData) - throws IOException { - super(name, factories, context, parent, pipelineAggregators, metaData); - // these two filters are cached in the parser - this.childFilter = context.searcher().createWeight(context.searcher().rewrite(childFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); - this.parentFilter = context.searcher().createWeight(context.searcher().rewrite(parentFilter), ScoreMode.COMPLETE_NO_SCORES, 1f); - this.parentOrdToBuckets = context.bigArrays().newLongArray(maxOrd, false); - this.parentOrdToBuckets.fill(0, maxOrd, -1); - this.parentOrdToOtherBuckets = new LongObjectPagedHashMap<>(context.bigArrays()); - this.valuesSource = valuesSource; + long maxOrd, List pipelineAggregators, Map metaData) throws IOException { + super(name, factories, context, parent, parentFilter, childFilter, valuesSource, maxOrd, pipelineAggregators, metaData); } @Override @@ -99,96 +54,4 @@ public InternalAggregation buildEmptyAggregation() { metaData()); } - @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, - final LeafBucketCollector sub) throws IOException { - if (valuesSource == null) { - return LeafBucketCollector.NO_OP_COLLECTOR; - } - final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx); - final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), parentFilter.scorerSupplier(ctx)); - return new LeafBucketCollector() { - - @Override - public void collect(int docId, long bucket) throws IOException { - if (parentDocs.get(docId) && globalOrdinals.advanceExact(docId)) { - long globalOrdinal = globalOrdinals.nextOrd(); - assert globalOrdinals.nextOrd() == SortedSetDocValues.NO_MORE_ORDS; - if (globalOrdinal != -1) { - if (parentOrdToBuckets.get(globalOrdinal) == -1) { - parentOrdToBuckets.set(globalOrdinal, bucket); - } else { - long[] bucketOrds = parentOrdToOtherBuckets.get(globalOrdinal); - if (bucketOrds != null) { - bucketOrds = Arrays.copyOf(bucketOrds, bucketOrds.length + 1); - bucketOrds[bucketOrds.length - 1] = bucket; - parentOrdToOtherBuckets.put(globalOrdinal, bucketOrds); - } else { - parentOrdToOtherBuckets.put(globalOrdinal, new long[] { bucket }); - } - multipleBucketsPerParentOrd = true; - } - } - } - } - }; - } - - @Override - protected void doPostCollection() throws IOException { - IndexReader indexReader = context().searcher().getIndexReader(); - for (LeafReaderContext ctx : indexReader.leaves()) { - Scorer childDocsScorer = childFilter.scorer(ctx); - if (childDocsScorer == null) { - continue; - } - DocIdSetIterator childDocsIter = childDocsScorer.iterator(); - - final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx); - - final SortedSetDocValues globalOrdinals = valuesSource.globalOrdinalsValues(ctx); - // Set the scorer, since we now replay only the child docIds - sub.setScorer(new Scorable() { - @Override - public float score() { - return 1f; - } - - @Override - public int docID() { - return childDocsIter.docID(); - } - }); - - final Bits liveDocs = ctx.reader().getLiveDocs(); - for (int docId = childDocsIter - .nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = childDocsIter - .nextDoc()) { - if (liveDocs != null && liveDocs.get(docId) == false) { - continue; - } - if (globalOrdinals.advanceExact(docId)) { - long globalOrdinal = globalOrdinals.nextOrd(); - assert globalOrdinals.nextOrd() == SortedSetDocValues.NO_MORE_ORDS; - long bucketOrd = parentOrdToBuckets.get(globalOrdinal); - if (bucketOrd != -1) { - collectBucket(sub, docId, bucketOrd); - if (multipleBucketsPerParentOrd) { - long[] otherBucketOrds = parentOrdToOtherBuckets.get(globalOrdinal); - if (otherBucketOrds != null) { - for (long otherBucketOrd : otherBucketOrds) { - collectBucket(sub, docId, otherBucketOrd); - } - } - } - } - } - } - } - } - - @Override - protected void doClose() { - Releasables.close(parentOrdToBuckets, parentOrdToOtherBuckets); - } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BitArray.java b/server/src/main/java/org/elasticsearch/common/util/BitArray.java similarity index 92% rename from server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BitArray.java rename to server/src/main/java/org/elasticsearch/common/util/BitArray.java index 6b35d7d2e2e0a..54fa4a669de29 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BitArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BitArray.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.search.aggregations.bucket.composite; +package org.elasticsearch.common.util; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; @@ -30,11 +30,11 @@ * The underlying long array grows lazily based on the biggest index * that needs to be set. */ -final class BitArray implements Releasable { +public final class BitArray implements Releasable { private final BigArrays bigArrays; private LongArray bits; - BitArray(BigArrays bigArrays, int initialSize) { + public BitArray(int initialSize, BigArrays bigArrays) { this.bigArrays = bigArrays; this.bits = bigArrays.newLongArray(initialSize, true); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java index d243b0e75924e..633d919f140cc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; import org.elasticsearch.common.util.DoubleArray; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.MappedFieldType; @@ -48,7 +49,7 @@ class DoubleValuesSource extends SingleDimensionValuesSource { DocValueFormat format, boolean missingBucket, int size, int reverseMul) { super(bigArrays, format, fieldType, missingBucket, size, reverseMul); this.docValuesFunc = docValuesFunc; - this.bits = missingBucket ? new BitArray(bigArrays, 100) : null; + this.bits = missingBucket ? new BitArray(100, bigArrays) : null; this.values = bigArrays.newDoubleArray(Math.min(size, 100), false); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java index 6d5e9f7d6e251..e5ecbd6d00e20 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -61,7 +62,7 @@ class LongValuesSource extends SingleDimensionValuesSource { this.bigArrays = bigArrays; this.docValuesFunc = docValuesFunc; this.rounding = rounding; - this.bits = missingBucket ? new BitArray(bigArrays, Math.min(size, 100)) : null; + this.bits = missingBucket ? new BitArray(Math.min(size, 100), bigArrays) : null; this.values = bigArrays.newLongArray(Math.min(size, 100), false); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/BitArrayTests.java b/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java similarity index 90% rename from server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/BitArrayTests.java rename to server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java index 1806080260f28..518bbc08f4cf9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/BitArrayTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BitArrayTests.java @@ -17,9 +17,8 @@ * under the License. */ -package org.elasticsearch.search.aggregations.bucket.composite; +package org.elasticsearch.common.util; -import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -28,7 +27,7 @@ public class BitArrayTests extends ESTestCase { public void testRandom() { - try (BitArray bitArray = new BitArray(BigArrays.NON_RECYCLING_INSTANCE, 1)) { + try (BitArray bitArray = new BitArray(1, BigArrays.NON_RECYCLING_INSTANCE)) { int numBits = randomIntBetween(1000, 10000); for (int step = 0; step < 3; step++) { boolean[] bits = new boolean[numBits]; From a39a67cd38981c51c974ea6b1f3291bd7970fe9a Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Fri, 26 Oct 2018 15:34:48 +0100 Subject: [PATCH 4/9] [ML] Extract common native process base class (#34856) We currently have two different native processes: autodetect & normalizer. There are plans for introducing a new process. All these share many things in common. This commit refactors the processes to extend an `AbstractNativeProcess` class that encapsulates those commonalities with the purpose of reusing the code for new processes in the future. --- .../xpack/core/ml/job/config/Detector.java | 2 +- .../writer/RecordWriter.java | 2 +- .../ml/job/config/AnalysisConfigTests.java | 2 +- .../core/ml/job/config/DetectorTests.java | 2 +- .../xpack/ml/MachineLearning.java | 9 +- .../xpack/ml/MachineLearningFeatureSet.java | 4 +- .../xpack/ml/MlLifeCycleService.java | 4 +- .../process/autodetect/AutodetectBuilder.java | 4 +- .../autodetect/AutodetectCommunicator.java | 2 +- .../process/autodetect/AutodetectProcess.java | 75 +---- .../autodetect/AutodetectProcessManager.java | 2 +- .../BlackHoleAutodetectProcess.java | 2 +- .../autodetect/NativeAutodetectProcess.java | 240 ++-------------- .../NativeAutodetectProcessFactory.java | 8 +- ...sor.java => AutodetectStateProcessor.java} | 28 +- .../writer/AbstractDataToProcessWriter.java | 1 + .../writer/ControlMsgToProcessWriter.java | 3 +- .../autodetect/writer/CsvRecordWriter.java | 2 +- .../MultiplyingNormalizerProcess.java | 45 ++- .../normalizer/NativeNormalizerProcess.java | 91 +----- .../NativeNormalizerProcessFactory.java | 31 +- .../process/normalizer/NormalizerProcess.java | 28 +- .../output/NormalizerResultHandler.java | 7 +- .../ml/process/AbstractNativeProcess.java | 265 ++++++++++++++++++ .../{job => }/process/NativeController.java | 4 +- .../process/NativeControllerHolder.java | 2 +- .../xpack/ml/process/NativeProcess.java | 85 ++++++ .../process/NativeStorageProvider.java | 2 +- .../ml/{job => }/process/ProcessPipes.java | 2 +- .../xpack/ml/process/StateProcessor.java | 14 + .../process/logging/CppLogMessage.java | 2 +- .../process/logging/CppLogMessageHandler.java | 2 +- .../writer/LengthEncodedWriter.java | 4 +- .../autodetect/AutodetectBuilderTests.java | 4 +- .../NativeAutodetectProcessTests.java | 12 +- ...ava => AutodetectStateProcessorTests.java} | 25 +- .../AbstractDataToProcessWriterTests.java | 1 + .../ControlMsgToProcessWriterTests.java | 1 + .../process/normalizer/NormalizerTests.java | 7 +- .../output/NormalizerResultHandlerTests.java | 3 +- .../process/NativeControllerTests.java | 2 +- .../process/NativeStorageProviderTests.java | 9 +- .../{job => }/process/ProcessPipesTests.java | 2 +- .../logging/CppLogMessageHandlerTests.java | 4 +- .../process/logging/CppLogMessageTests.java | 4 +- .../writer/LengthEncodedWriterTests.java | 2 +- 46 files changed, 560 insertions(+), 492 deletions(-) rename x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/{job/process/autodetect => process}/writer/RecordWriter.java (93%) rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/{StateProcessor.java => AutodetectStateProcessor.java} (82%) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job => }/process/NativeController.java (98%) rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job => }/process/NativeControllerHolder.java (97%) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeProcess.java rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job => }/process/NativeStorageProvider.java (98%) rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job => }/process/ProcessPipes.java (99%) create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/StateProcessor.java rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job => }/process/logging/CppLogMessage.java (99%) rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job => }/process/logging/CppLogMessageHandler.java (99%) rename x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/{job/process/autodetect => process}/writer/LengthEncodedWriter.java (95%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/{StateProcessorTests.java => AutodetectStateProcessorTests.java} (88%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/{job => }/process/NativeControllerTests.java (99%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/{job => }/process/NativeStorageProviderTests.java (99%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/{job => }/process/ProcessPipesTests.java (99%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/{job => }/process/logging/CppLogMessageHandlerTests.java (99%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/{job => }/process/logging/CppLogMessageTests.java (98%) rename x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/{job/process/autodetect => process}/writer/LengthEncodedWriterTests.java (99%) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java index b6275c6e0579a..d53e4cb74126d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java @@ -16,7 +16,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter; +import org.elasticsearch.xpack.core.ml.process.writer.RecordWriter; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/writer/RecordWriter.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/process/writer/RecordWriter.java similarity index 93% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/writer/RecordWriter.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/process/writer/RecordWriter.java index 61b904246d50f..b66fd948a5a83 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/writer/RecordWriter.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/process/writer/RecordWriter.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.core.ml.job.process.autodetect.writer; +package org.elasticsearch.xpack.core.ml.process.writer; import java.io.IOException; import java.util.List; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java index d691124a90a43..8843a336bde3d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter; +import org.elasticsearch.xpack.core.ml.process.writer.RecordWriter; import java.util.ArrayList; import java.util.Arrays; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java index 2f7eab0e97c70..fe546a371816d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/DetectorTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter; +import org.elasticsearch.xpack.core.ml.process.writer.RecordWriter; import java.util.ArrayList; import java.util.Arrays; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index cdd3af133f6dc..2e90e678351c4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -168,8 +168,6 @@ import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister; import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; -import org.elasticsearch.xpack.ml.job.process.NativeController; -import org.elasticsearch.xpack.ml.job.process.NativeControllerHolder; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectBuilder; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessFactory; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; @@ -180,6 +178,8 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerFactory; import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerProcessFactory; import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.elasticsearch.xpack.ml.process.NativeController; +import org.elasticsearch.xpack.ml.process.NativeControllerHolder; import org.elasticsearch.xpack.ml.rest.RestDeleteExpiredDataAction; import org.elasticsearch.xpack.ml.rest.RestFindFileStructureAction; import org.elasticsearch.xpack.ml.rest.RestMlInfoAction; @@ -386,7 +386,7 @@ public Collection createComponents(Client client, ClusterService cluster nativeController, client, clusterService); - normalizerProcessFactory = new NativeNormalizerProcessFactory(environment, settings, nativeController); + normalizerProcessFactory = new NativeNormalizerProcessFactory(environment, nativeController); } catch (IOException e) { // This also should not happen in production, as the MachineLearningFeatureSet should have // hit the same error first and brought down the node with a friendlier error message @@ -396,8 +396,7 @@ public Collection createComponents(Client client, ClusterService cluster autodetectProcessFactory = (job, autodetectParams, executorService, onProcessCrash) -> new BlackHoleAutodetectProcess(job.getId()); // factor of 1.0 makes renormalization a no-op - normalizerProcessFactory = (jobId, quantilesState, bucketSpan, executorService) -> - new MultiplyingNormalizerProcess(settings, 1.0); + normalizerProcessFactory = (jobId, quantilesState, bucketSpan, executorService) -> new MultiplyingNormalizerProcess(1.0); } NormalizerFactory normalizerFactory = new NormalizerFactory(normalizerProcessFactory, threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java index b5ff2e2a7de6e..d9b8ea7cd4226 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java @@ -31,8 +31,8 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; -import org.elasticsearch.xpack.ml.job.process.NativeController; -import org.elasticsearch.xpack.ml.job.process.NativeControllerHolder; +import org.elasticsearch.xpack.ml.process.NativeController; +import org.elasticsearch.xpack.ml.process.NativeControllerHolder; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; import org.elasticsearch.xpack.core.ml.stats.ForecastStats; import org.elasticsearch.xpack.core.ml.stats.StatsAccumulator; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java index b0a0eebc49df3..efc0517900ec4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java @@ -10,8 +10,8 @@ import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.ml.datafeed.DatafeedManager; -import org.elasticsearch.xpack.ml.job.process.NativeController; -import org.elasticsearch.xpack.ml.job.process.NativeControllerHolder; +import org.elasticsearch.xpack.ml.process.NativeController; +import org.elasticsearch.xpack.ml.process.NativeControllerHolder; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import java.io.IOException; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java index 4942200606dba..dbc565fc50c12 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java @@ -19,9 +19,9 @@ import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; -import org.elasticsearch.xpack.ml.job.process.NativeController; +import org.elasticsearch.xpack.ml.process.NativeController; import org.elasticsearch.xpack.ml.job.process.ProcessBuilderUtils; -import org.elasticsearch.xpack.ml.job.process.ProcessPipes; +import org.elasticsearch.xpack.ml.process.ProcessPipes; import org.elasticsearch.xpack.ml.job.process.autodetect.writer.AnalysisLimitsWriter; import org.elasticsearch.xpack.ml.job.process.autodetect.writer.FieldConfigWriter; import org.elasticsearch.xpack.ml.job.process.autodetect.writer.ModelPlotConfigWriter; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java index 0206bd88245b3..3f93d46b72737 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java @@ -264,7 +264,7 @@ public void forecastJob(ForecastParams params, BiConsumer handl public void persistJob(BiConsumer handler) { submitOperation(() -> { - autodetectProcess.persistJob(); + autodetectProcess.persistState(); return null; }, handler); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcess.java index 21be815d561a8..dab0c5aa49872 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcess.java @@ -10,23 +10,22 @@ import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.ml.job.persistence.StateStreamer; import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.ForecastParams; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.ml.job.results.AutodetectResult; +import org.elasticsearch.xpack.ml.process.NativeProcess; -import java.io.Closeable; import java.io.IOException; -import java.time.ZonedDateTime; import java.util.Iterator; import java.util.List; /** * Interface representing the native C++ autodetect process */ -public interface AutodetectProcess extends Closeable { +public interface AutodetectProcess extends NativeProcess { /** * Restore state from the given {@link ModelSnapshot} @@ -35,22 +34,6 @@ public interface AutodetectProcess extends Closeable { */ void restoreState(StateStreamer stateStreamer, ModelSnapshot modelSnapshot); - /** - * Is the process ready to receive data? - * @return {@code true} if the process is ready to receive data - */ - boolean isReady(); - - /** - * Write the record to autodetect. The record parameter should not be encoded - * (i.e. length encoded) the implementation will appy the corrrect encoding. - * - * @param record Plain array of strings, implementors of this class should - * encode the record appropriately - * @throws IOException If the write failed - */ - void writeRecord(String[] record) throws IOException; - /** * Write the reset buckets control message * @@ -115,60 +98,8 @@ void writeUpdateDetectorRulesMessage(int detectorIndex, List rule */ void forecastJob(ForecastParams params) throws IOException; - /** - * Ask the job to start persisting model state in the background - * @throws IOException If writing the request fails - */ - void persistJob() throws IOException; - - /** - * Flush the output data stream - */ - void flushStream() throws IOException; - - /** - * Kill the process. Do not wait for it to stop gracefully. - */ - void kill() throws IOException; - /** * @return stream of autodetect results. */ Iterator readAutodetectResults(); - - /** - * The time the process was started - * @return Process start time - */ - ZonedDateTime getProcessStartTime(); - - /** - * Returns true if the process still running. - * Methods such as {@link #flushJob(FlushJobParams)} are essentially - * asynchronous the command will be continue to execute in the process after - * the call has returned. This method tests whether something catastrophic - * occurred in the process during its execution. - * @return True if the process is still running - */ - boolean isProcessAlive(); - - /** - * Check whether autodetect terminated given maximum 45ms for termination - * - * Processing errors are highly likely caused by autodetect being unexpectedly - * terminated. - * - * Workaround: As we can not easily check if autodetect is alive, we rely on - * the logPipe being ended. As the loghandler runs in another thread which - * might fall behind this one, we give it a grace period of 45ms. - * - * @return false if process has ended for sure, true if it probably still runs - */ - boolean isProcessAliveAfterWaiting(); - - /** - * Read any content in the error output buffer. - * @return An error message or empty String if no error. - */ - String readError(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index ec6b67da1dca8..8dbc13038c7f7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -50,7 +50,7 @@ import org.elasticsearch.xpack.ml.job.persistence.ScheduledEventsQueryBuilder; import org.elasticsearch.xpack.ml.job.persistence.StateStreamer; import org.elasticsearch.xpack.ml.job.process.DataCountsReporter; -import org.elasticsearch.xpack.ml.job.process.NativeStorageProvider; +import org.elasticsearch.xpack.ml.process.NativeStorageProvider; import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutoDetectResultProcessor; import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/BlackHoleAutodetectProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/BlackHoleAutodetectProcess.java index 8ff54e80785c5..e1b69d78894db 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/BlackHoleAutodetectProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/BlackHoleAutodetectProcess.java @@ -96,7 +96,7 @@ public String flushJob(FlushJobParams params) throws IOException { } @Override - public void persistJob() { + public void persistState() { } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcess.java index faae29fd1eb56..112805b2f7414 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcess.java @@ -5,300 +5,116 @@ */ package org.elasticsearch.xpack.ml.job.process.autodetect; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; import org.elasticsearch.xpack.core.ml.job.config.DetectionRule; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.ml.job.persistence.StateStreamer; -import org.elasticsearch.xpack.ml.job.process.NativeControllerHolder; import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutodetectResultsParser; -import org.elasticsearch.xpack.ml.job.process.autodetect.output.StateProcessor; import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.ForecastParams; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.ml.job.process.autodetect.writer.ControlMsgToProcessWriter; -import org.elasticsearch.xpack.ml.job.process.autodetect.writer.LengthEncodedWriter; -import org.elasticsearch.xpack.ml.job.process.logging.CppLogMessageHandler; import org.elasticsearch.xpack.ml.job.results.AutodetectResult; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.process.AbstractNativeProcess; -import java.io.BufferedOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.nio.file.Files; import java.nio.file.Path; -import java.time.Duration; -import java.time.ZonedDateTime; import java.util.Iterator; import java.util.List; -import java.util.Objects; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; /** * Autodetect process using native code. */ -class NativeAutodetectProcess implements AutodetectProcess { - private static final Logger LOGGER = Loggers.getLogger(NativeAutodetectProcess.class); +class NativeAutodetectProcess extends AbstractNativeProcess implements AutodetectProcess { - private static final Duration WAIT_FOR_KILL_TIMEOUT = Duration.ofMillis(1000); + private static final Logger LOGGER = LogManager.getLogger(NativeAutodetectProcess.class); + + private static final String NAME = "autodetect"; - private final String jobId; - private final CppLogMessageHandler cppLogHandler; - private final OutputStream processInStream; - private final InputStream processOutStream; - private final OutputStream processRestoreStream; - private final LengthEncodedWriter recordWriter; - private final ZonedDateTime startTime; - private final int numberOfFields; - private final List filesToDelete; - private final Runnable onProcessCrash; - private volatile Future logTailFuture; - private volatile Future stateProcessorFuture; - private volatile boolean processCloseInitiated; - private volatile boolean processKilled; - private volatile boolean isReady; private final AutodetectResultsParser resultsParser; NativeAutodetectProcess(String jobId, InputStream logStream, OutputStream processInStream, InputStream processOutStream, OutputStream processRestoreStream, int numberOfFields, List filesToDelete, AutodetectResultsParser resultsParser, Runnable onProcessCrash) { - this.jobId = jobId; - cppLogHandler = new CppLogMessageHandler(jobId, logStream); - this.processInStream = new BufferedOutputStream(processInStream); - this.processOutStream = processOutStream; - this.processRestoreStream = processRestoreStream; - this.recordWriter = new LengthEncodedWriter(this.processInStream); - startTime = ZonedDateTime.now(); - this.numberOfFields = numberOfFields; - this.filesToDelete = filesToDelete; + super(jobId, logStream, processInStream, processOutStream, processRestoreStream, numberOfFields, filesToDelete, onProcessCrash); this.resultsParser = resultsParser; - this.onProcessCrash = Objects.requireNonNull(onProcessCrash); } - public void start(ExecutorService executorService, StateProcessor stateProcessor, InputStream persistStream) { - logTailFuture = executorService.submit(() -> { - try (CppLogMessageHandler h = cppLogHandler) { - h.tailStream(); - } catch (IOException e) { - if (processKilled == false) { - LOGGER.error(new ParameterizedMessage("[{}] Error tailing autodetect process logs", jobId), e); - } - } finally { - if (processCloseInitiated == false && processKilled == false) { - // The log message doesn't say "crashed", as the process could have been killed - // by a user or other process (e.g. the Linux OOM killer) - - String errors = cppLogHandler.getErrors(); - LOGGER.error("[{}] autodetect process stopped unexpectedly: {}", jobId, errors); - onProcessCrash.run(); - } - } - }); - stateProcessorFuture = executorService.submit(() -> { - try (InputStream in = persistStream) { - stateProcessor.process(jobId, in); - if (processKilled == false) { - LOGGER.info("[{}] State output finished", jobId); - } - } catch (IOException e) { - if (processKilled == false) { - LOGGER.error(new ParameterizedMessage("[{}] Error reading autodetect state output", jobId), e); - } - } - }); + @Override + public String getName() { + return NAME; } @Override public void restoreState(StateStreamer stateStreamer, ModelSnapshot modelSnapshot) { if (modelSnapshot != null) { - try (OutputStream r = processRestoreStream) { - stateStreamer.restoreStateToStream(jobId, modelSnapshot, r); + try (OutputStream r = processRestoreStream()) { + stateStreamer.restoreStateToStream(jobId(), modelSnapshot, r); } catch (Exception e) { // TODO: should we fail to start? - if (processKilled == false) { - LOGGER.error("Error restoring model state for job " + jobId, e); + if (isProcessKilled() == false) { + LOGGER.error("Error restoring model state for job " + jobId(), e); } } } - isReady = true; - } - - @Override - public boolean isReady() { - return isReady; - } - - @Override - public void writeRecord(String[] record) throws IOException { - recordWriter.writeRecord(record); + setReady(); } @Override public void writeResetBucketsControlMessage(DataLoadParams params) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeResetBucketsMessage(params); + newMessageWriter().writeResetBucketsMessage(params); } @Override public void writeUpdateModelPlotMessage(ModelPlotConfig modelPlotConfig) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeUpdateModelPlotMessage(modelPlotConfig); + newMessageWriter().writeUpdateModelPlotMessage(modelPlotConfig); } @Override public void writeUpdateDetectorRulesMessage(int detectorIndex, List rules) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeUpdateDetectorRulesMessage(detectorIndex, rules); + newMessageWriter().writeUpdateDetectorRulesMessage(detectorIndex, rules); } @Override public void writeUpdateFiltersMessage(List filters) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeUpdateFiltersMessage(filters); + newMessageWriter().writeUpdateFiltersMessage(filters); } @Override public void writeUpdateScheduledEventsMessage(List events, TimeValue bucketSpan) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeUpdateScheduledEventsMessage(events, bucketSpan); + newMessageWriter().writeUpdateScheduledEventsMessage(events, bucketSpan); } @Override public String flushJob(FlushJobParams params) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); + ControlMsgToProcessWriter writer = newMessageWriter(); writer.writeFlushControlMessage(params); return writer.writeFlushMessage(); } @Override public void forecastJob(ForecastParams params) throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeForecastMessage(params); - } - - @Override - public void persistJob() throws IOException { - ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfFields); - writer.writeStartBackgroundPersistMessage(); - } - - @Override - public void flushStream() throws IOException { - recordWriter.flush(); - } - - @Override - public void close() throws IOException { - try { - processCloseInitiated = true; - // closing its input causes the process to exit - processInStream.close(); - // wait for the process to exit by waiting for end-of-file on the named pipe connected - // to the state processor - it may take a long time for all the model state to be - // indexed - if (stateProcessorFuture != null) { - stateProcessorFuture.get(MachineLearningField.STATE_PERSIST_RESTORE_TIMEOUT.getMinutes(), TimeUnit.MINUTES); - } - // the log processor should have stopped by now too - assume processing the logs will - // take no more than 5 seconds longer than processing the state (usually it should - // finish first) - if (logTailFuture != null) { - logTailFuture.get(5, TimeUnit.SECONDS); - } - - if (cppLogHandler.seenFatalError()) { - throw ExceptionsHelper.serverError(cppLogHandler.getErrors()); - } - LOGGER.debug("[{}] Autodetect process exited", jobId); - } catch (ExecutionException | TimeoutException e) { - LOGGER.warn(new ParameterizedMessage("[{}] Exception closing the running autodetect process", jobId), e); - } catch (InterruptedException e) { - LOGGER.warn(new ParameterizedMessage("[{}] Exception closing the running autodetect process", jobId), e); - Thread.currentThread().interrupt(); - } finally { - deleteAssociatedFiles(); - } + newMessageWriter().writeForecastMessage(params); } @Override - public void kill() throws IOException { - processKilled = true; - try { - // The PID comes via the processes log stream. We don't wait for it to arrive here, - // but if the wait times out it implies the process has only just started, in which - // case it should die very quickly when we close its input stream. - NativeControllerHolder.getNativeController().killProcess(cppLogHandler.getPid(Duration.ZERO)); - - // Wait for the process to die before closing processInStream as if the process - // is still alive when processInStream is closed autodetect will start persisting state - cppLogHandler.waitForLogStreamClose(WAIT_FOR_KILL_TIMEOUT); - } catch (TimeoutException e) { - LOGGER.warn("[{}] Failed to get PID of autodetect process to kill", jobId); - } finally { - try { - processInStream.close(); - } catch (IOException e) { - // Ignore it - we're shutting down and the method itself has logged a warning - } - try { - deleteAssociatedFiles(); - } catch (IOException e) { - // Ignore it - we're shutting down and the method itself has logged a warning - } - } - } - - private synchronized void deleteAssociatedFiles() throws IOException { - if (filesToDelete == null) { - return; - } - - for (Path fileToDelete : filesToDelete) { - if (Files.deleteIfExists(fileToDelete)) { - LOGGER.debug("[{}] Deleted file {}", jobId, fileToDelete.toString()); - } else { - LOGGER.warn("[{}] Failed to delete file {}", jobId, fileToDelete.toString()); - } - } - - filesToDelete.clear(); + public void persistState() throws IOException { + newMessageWriter().writeStartBackgroundPersistMessage(); } @Override public Iterator readAutodetectResults() { - return resultsParser.parseResults(processOutStream); + return resultsParser.parseResults(processOutStream()); } - @Override - public ZonedDateTime getProcessStartTime() { - return startTime; - } - - @Override - public boolean isProcessAlive() { - // Sanity check: make sure the process hasn't terminated already - return !cppLogHandler.hasLogStreamEnded(); - } - - @Override - public boolean isProcessAliveAfterWaiting() { - cppLogHandler.waitForLogStreamClose(Duration.ofMillis(45)); - return isProcessAlive(); - } - - @Override - public String readError() { - return cppLogHandler.getErrors(); + private ControlMsgToProcessWriter newMessageWriter() { + return new ControlMsgToProcessWriter(recordWriter(), numberOfFields()); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java index 06055476f7642..ea31c5de4dffa 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessFactory.java @@ -16,10 +16,10 @@ import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.job.process.NativeController; -import org.elasticsearch.xpack.ml.job.process.ProcessPipes; +import org.elasticsearch.xpack.ml.process.NativeController; +import org.elasticsearch.xpack.ml.process.ProcessPipes; import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutodetectResultsParser; -import org.elasticsearch.xpack.ml.job.process.autodetect.output.StateProcessor; +import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutodetectStateProcessor; import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams; import org.elasticsearch.xpack.ml.utils.NamedPipeHelper; @@ -67,7 +67,7 @@ public AutodetectProcess createAutodetectProcess(Job job, // The extra 1 is the control field int numberOfFields = job.allInputFields().size() + (includeTokensField ? 1 : 0) + 1; - StateProcessor stateProcessor = new StateProcessor(settings, client); + AutodetectStateProcessor stateProcessor = new AutodetectStateProcessor(client, job.getId()); AutodetectResultsParser resultsParser = new AutodetectResultsParser(settings); NativeAutodetectProcess autodetect = new NativeAutodetectProcess( job.getId(), processPipes.getLogStream().get(), processPipes.getProcessInStream().get(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessor.java similarity index 82% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessor.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessor.java index ec62901d65a6e..63a496f0503bc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessor.java @@ -5,17 +5,18 @@ */ package org.elasticsearch.xpack.ml.job.process.autodetect.output; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.CompositeBytesReference; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; +import org.elasticsearch.xpack.ml.process.StateProcessor; import java.io.IOException; import java.io.InputStream; @@ -28,17 +29,22 @@ /** * Reads the autodetect state and persists via a bulk request */ -public class StateProcessor extends AbstractComponent { +public class AutodetectStateProcessor implements StateProcessor { + + private static final Logger LOGGER = LogManager.getLogger(AutodetectStateProcessor.class); private static final int READ_BUF_SIZE = 8192; + private final Client client; + private final String jobId; - public StateProcessor(Settings settings, Client client) { - super(settings); + public AutodetectStateProcessor(Client client, String jobId) { this.client = client; + this.jobId = jobId; } - public void process(String jobId, InputStream in) throws IOException { + @Override + public void process(InputStream in) throws IOException { BytesReference bytesToDate = null; List newBlocks = new ArrayList<>(); byte[] readBuf = new byte[READ_BUF_SIZE]; @@ -56,7 +62,7 @@ public void process(String jobId, InputStream in) throws IOException { } else { BytesReference newBytes = new CompositeBytesReference(newBlocks.toArray(new BytesReference[0])); bytesToDate = (bytesToDate == null) ? newBytes : new CompositeBytesReference(bytesToDate, newBytes); - bytesToDate = splitAndPersist(jobId, bytesToDate, searchFrom); + bytesToDate = splitAndPersist(bytesToDate, searchFrom); searchFrom = (bytesToDate == null) ? 0 : bytesToDate.length(); newBlocks.clear(); } @@ -69,7 +75,7 @@ public void process(String jobId, InputStream in) throws IOException { * data is expected to be a series of Elasticsearch bulk requests in UTF-8 JSON * (as would be uploaded to the public REST API) separated by zero bytes ('\0'). */ - private BytesReference splitAndPersist(String jobId, BytesReference bytesRef, int searchFrom) throws IOException { + private BytesReference splitAndPersist(BytesReference bytesRef, int searchFrom) throws IOException { int splitFrom = 0; while (true) { int nextZeroByte = findNextZeroByte(bytesRef, searchFrom, splitFrom); @@ -80,7 +86,7 @@ private BytesReference splitAndPersist(String jobId, BytesReference bytesRef, in // Ignore completely empty chunks if (nextZeroByte > splitFrom) { // No validation - assume the native process has formatted the state correctly - persist(jobId, bytesRef.slice(splitFrom, nextZeroByte - splitFrom)); + persist(bytesRef.slice(splitFrom, nextZeroByte - splitFrom)); } splitFrom = nextZeroByte + 1; } @@ -90,11 +96,11 @@ private BytesReference splitAndPersist(String jobId, BytesReference bytesRef, in return bytesRef.slice(splitFrom, bytesRef.length() - splitFrom); } - void persist(String jobId, BytesReference bytes) throws IOException { + void persist(BytesReference bytes) throws IOException { BulkRequest bulkRequest = new BulkRequest(); bulkRequest.add(bytes, AnomalyDetectorsIndex.jobStateIndexName(), ElasticsearchMappings.DOC_TYPE, XContentType.JSON); if (bulkRequest.numberOfActions() > 0) { - logger.trace("[{}] Persisting job state document", jobId); + LOGGER.trace("[{}] Persisting job state document", jobId); try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) { client.bulk(bulkRequest).actionGet(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java index 7961fec449774..dc9d77cd68784 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriter.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.ml.job.process.DataCountsReporter; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcess; +import org.elasticsearch.xpack.ml.process.writer.LengthEncodedWriter; import org.supercsv.encoder.CsvEncoder; import org.supercsv.encoder.DefaultCsvEncoder; import org.supercsv.prefs.CsvPreference; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriter.java index 2c026ec15506e..fc98990d8d61f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriter.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.ForecastParams; +import org.elasticsearch.xpack.ml.process.writer.LengthEncodedWriter; import java.io.IOException; import java.io.OutputStream; @@ -168,7 +169,7 @@ public void writeForecastMessage(ForecastParams params) throws IOException { builder.field("tmp_storage", params.getTmpStorage()); } builder.endObject(); - + writeMessage(FORECAST_MESSAGE_CODE + Strings.toString(builder)); fillCommandBuffer(); lengthEncodedWriter.flush(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvRecordWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvRecordWriter.java index 2228835bea2a6..57bbb69c5d0de 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvRecordWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/CsvRecordWriter.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.ml.job.process.autodetect.writer; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter; +import org.elasticsearch.xpack.core.ml.process.writer.RecordWriter; import org.supercsv.io.CsvListWriter; import org.supercsv.prefs.CsvPreference; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/MultiplyingNormalizerProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/MultiplyingNormalizerProcess.java index 8aa266e15d22e..5d320a1bd715c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/MultiplyingNormalizerProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/MultiplyingNormalizerProcess.java @@ -5,9 +5,8 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -17,6 +16,7 @@ import java.io.IOException; import java.io.PipedInputStream; import java.io.PipedOutputStream; +import java.time.ZonedDateTime; /** * Normalizer process that doesn't use native code. @@ -27,16 +27,15 @@ * - It can be used to produce results in testing that do not vary based on changes to the real normalization algorithms */ public class MultiplyingNormalizerProcess implements NormalizerProcess { - private static final Logger LOGGER = Loggers.getLogger(MultiplyingNormalizerProcess.class); - private final Settings settings; + private static final Logger LOGGER = LogManager.getLogger(MultiplyingNormalizerProcess.class); + private final double factor; private final PipedInputStream processOutStream; private XContentBuilder builder; private boolean shouldIgnoreHeader; - public MultiplyingNormalizerProcess(Settings settings, double factor) { - this.settings = settings; + public MultiplyingNormalizerProcess(double factor) { this.factor = factor; processOutStream = new PipedInputStream(); try { @@ -49,6 +48,11 @@ public MultiplyingNormalizerProcess(Settings settings, double factor) { shouldIgnoreHeader = true; } + @Override + public boolean isReady() { + return true; + } + @Override public void writeRecord(String[] record) throws IOException { if (shouldIgnoreHeader) { @@ -77,13 +81,33 @@ public void writeRecord(String[] record) throws IOException { } @Override - public void close() throws IOException { + public void persistState() { + // Nothing to do + } + + @Override + public void flushStream() { + // Nothing to do + } + + @Override + public void kill() { + // Nothing to do + } + + @Override + public ZonedDateTime getProcessStartTime() { + return null; + } + + @Override + public void close() { builder.close(); } @Override public NormalizerResultHandler createNormalizedResultsHandler() { - return new NormalizerResultHandler(settings, processOutStream); + return new NormalizerResultHandler(processOutStream); } @Override @@ -92,6 +116,11 @@ public boolean isProcessAlive() { return true; } + @Override + public boolean isProcessAliveAfterWaiting() { + return true; + } + @Override public String readError() { return ""; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcess.java index ee6c7818b38ec..6b67ffa6acb6f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcess.java @@ -5,104 +5,41 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.xpack.ml.job.process.autodetect.writer.LengthEncodedWriter; -import org.elasticsearch.xpack.ml.job.process.logging.CppLogMessageHandler; import org.elasticsearch.xpack.ml.job.process.normalizer.output.NormalizerResultHandler; -import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.process.AbstractNativeProcess; -import java.io.BufferedOutputStream; -import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; +import java.util.Collections; /** * Normalizer process using native code. */ -class NativeNormalizerProcess implements NormalizerProcess { - private static final Logger LOGGER = Loggers.getLogger(NativeNormalizerProcess.class); +class NativeNormalizerProcess extends AbstractNativeProcess implements NormalizerProcess { - private final String jobId; - private final Settings settings; - private final CppLogMessageHandler cppLogHandler; - private final OutputStream processInStream; - private final InputStream processOutStream; - private final LengthEncodedWriter recordWriter; - private volatile boolean processCloseInitiated; - private Future logTailThread; + private static final String NAME = "normalizer"; - NativeNormalizerProcess(String jobId, Settings settings, InputStream logStream, OutputStream processInStream, - InputStream processOutStream, ExecutorService executorService) throws EsRejectedExecutionException { - this.jobId = jobId; - this.settings = settings; - cppLogHandler = new CppLogMessageHandler(jobId, logStream); - this.processInStream = new BufferedOutputStream(processInStream); - this.processOutStream = processOutStream; - this.recordWriter = new LengthEncodedWriter(this.processInStream); - logTailThread = executorService.submit(() -> { - try (CppLogMessageHandler h = cppLogHandler) { - h.tailStream(); - } catch (IOException e) { - LOGGER.error(new ParameterizedMessage("[{}] Error tailing normalizer process logs", - new Object[] { jobId }), e); - } finally { - if (processCloseInitiated == false) { - // The log message doesn't say "crashed", as the process could have been killed - // by a user or other process (e.g. the Linux OOM killer) - LOGGER.error("[{}] normalizer process stopped unexpectedly", jobId); - } - } - }); + NativeNormalizerProcess(String jobId, InputStream logStream, OutputStream processInStream, InputStream processOutStream) { + super(jobId, logStream, processInStream, processOutStream, null, 0, Collections.emptyList(), () -> {}); } @Override - public void writeRecord(String[] record) throws IOException { - recordWriter.writeRecord(record); + public String getName() { + return NAME; } @Override - public void close() throws IOException { - try { - processCloseInitiated = true; - // closing its input causes the process to exit - processInStream.close(); - // wait for the process to exit by waiting for end-of-file on the named pipe connected to its logger - // this may take a long time as it persists the model state - logTailThread.get(5, TimeUnit.MINUTES); - if (cppLogHandler.seenFatalError()) { - throw ExceptionsHelper.serverError(cppLogHandler.getErrors()); - } - LOGGER.debug("[{}] Normalizer process exited", jobId); - } catch (ExecutionException | TimeoutException e) { - LOGGER.warn(new ParameterizedMessage("[{}] Exception closing the running normalizer process", new Object[] { jobId }), e); - } catch (InterruptedException e) { - LOGGER.warn("[{}] Exception closing the running normalizer process", jobId); - Thread.currentThread().interrupt(); - } + public boolean isReady() { + return true; } @Override - public NormalizerResultHandler createNormalizedResultsHandler() { - return new NormalizerResultHandler(settings, processOutStream); + public void persistState() { + // nothing to persist } @Override - public boolean isProcessAlive() { - // Sanity check: make sure the process hasn't terminated already - return !cppLogHandler.hasLogStreamEnded(); - } - - @Override - public String readError() { - return cppLogHandler.getErrors(); + public NormalizerResultHandler createNormalizedResultsHandler() { + return new NormalizerResultHandler(processOutStream()); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcessFactory.java index 60f52d3f44288..21f7229aef123 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NativeNormalizerProcessFactory.java @@ -5,13 +5,14 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.job.process.NativeController; -import org.elasticsearch.xpack.ml.job.process.ProcessPipes; +import org.elasticsearch.xpack.ml.process.NativeController; +import org.elasticsearch.xpack.ml.process.ProcessPipes; import org.elasticsearch.xpack.ml.utils.NamedPipeHelper; import java.io.IOException; @@ -22,17 +23,15 @@ public class NativeNormalizerProcessFactory implements NormalizerProcessFactory { - private static final Logger LOGGER = Loggers.getLogger(NativeNormalizerProcessFactory.class); + private static final Logger LOGGER = LogManager.getLogger(NativeNormalizerProcessFactory.class); private static final NamedPipeHelper NAMED_PIPE_HELPER = new NamedPipeHelper(); private static final Duration PROCESS_STARTUP_TIMEOUT = Duration.ofSeconds(10); private final Environment env; - private final Settings settings; private final NativeController nativeController; - public NativeNormalizerProcessFactory(Environment env, Settings settings, NativeController nativeController) { + public NativeNormalizerProcessFactory(Environment env, NativeController nativeController) { this.env = Objects.requireNonNull(env); - this.settings = Objects.requireNonNull(settings); this.nativeController = Objects.requireNonNull(nativeController); } @@ -43,8 +42,20 @@ public NormalizerProcess createNormalizerProcess(String jobId, String quantilesS true, false, true, true, false, false); createNativeProcess(jobId, quantilesState, processPipes, bucketSpan); - return new NativeNormalizerProcess(jobId, settings, processPipes.getLogStream().get(), - processPipes.getProcessInStream().get(), processPipes.getProcessOutStream().get(), executorService); + NativeNormalizerProcess normalizerProcess = new NativeNormalizerProcess(jobId, processPipes.getLogStream().get(), + processPipes.getProcessInStream().get(), processPipes.getProcessOutStream().get()); + + try { + normalizerProcess.start(executorService); + return normalizerProcess; + } catch (EsRejectedExecutionException e) { + try { + IOUtils.close(normalizerProcess); + } catch (IOException ioe) { + LOGGER.error("Can't close normalizer", ioe); + } + throw e; + } } private void createNativeProcess(String jobId, String quantilesState, ProcessPipes processPipes, Integer bucketSpan) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerProcess.java index d0ce62612bb69..230048c5b4d2e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerProcess.java @@ -6,40 +6,16 @@ package org.elasticsearch.xpack.ml.job.process.normalizer; import org.elasticsearch.xpack.ml.job.process.normalizer.output.NormalizerResultHandler; - -import java.io.Closeable; -import java.io.IOException; +import org.elasticsearch.xpack.ml.process.NativeProcess; /** * Interface representing the native C++ normalizer process */ -public interface NormalizerProcess extends Closeable { - - /** - * Write the record to normalizer. The record parameter should not be encoded - * (i.e. length encoded) the implementation will appy the corrrect encoding. - * - * @param record Plain array of strings, implementors of this class should - * encode the record appropriately - * @throws IOException If the write failed - */ - void writeRecord(String[] record) throws IOException; +public interface NormalizerProcess extends NativeProcess { /** * Create a result handler for this process's results. * @return results handler */ NormalizerResultHandler createNormalizedResultsHandler(); - - /** - * Returns true if the process still running. - * @return True if the process is still running - */ - boolean isProcessAlive(); - - /** - * Read any content in the error output buffer. - * @return An error message or empty String if no error. - */ - String readError(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandler.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandler.java index dcadef7a24b53..3b65a739e82a9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandler.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandler.java @@ -8,8 +8,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.CompositeBytesReference; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; @@ -26,15 +24,14 @@ /** * Reads normalizer output. */ -public class NormalizerResultHandler extends AbstractComponent { +public class NormalizerResultHandler { private static final int READ_BUF_SIZE = 1024; private final InputStream inputStream; private final List normalizedResults; - public NormalizerResultHandler(Settings settings, InputStream inputStream) { - super(settings); + public NormalizerResultHandler(InputStream inputStream) { this.inputStream = inputStream; normalizedResults = new ArrayList<>(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java new file mode 100644 index 0000000000000..b84bfdd38e19a --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java @@ -0,0 +1,265 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.process; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.xpack.core.ml.MachineLearningField; +import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.ml.process.logging.CppLogMessageHandler; +import org.elasticsearch.xpack.ml.process.writer.LengthEncodedWriter; + +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.time.Duration; +import java.time.ZonedDateTime; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +/** + * Abstract class for implementing a native process. + */ +public abstract class AbstractNativeProcess implements NativeProcess { + + private static final Logger LOGGER = LogManager.getLogger(AbstractNativeProcess.class); + + private static final Duration WAIT_FOR_KILL_TIMEOUT = Duration.ofMillis(1000); + + private final String jobId; + private final CppLogMessageHandler cppLogHandler; + private final OutputStream processInStream; + private final InputStream processOutStream; + private final OutputStream processRestoreStream; + private final LengthEncodedWriter recordWriter; + private final ZonedDateTime startTime; + private final int numberOfFields; + private final List filesToDelete; + private final Runnable onProcessCrash; + private volatile Future logTailFuture; + private volatile Future stateProcessorFuture; + private volatile boolean processCloseInitiated; + private volatile boolean processKilled; + private volatile boolean isReady; + + protected AbstractNativeProcess(String jobId, InputStream logStream, OutputStream processInStream, InputStream processOutStream, + OutputStream processRestoreStream, int numberOfFields, List filesToDelete, + Runnable onProcessCrash) { + this.jobId = jobId; + cppLogHandler = new CppLogMessageHandler(jobId, logStream); + this.processInStream = new BufferedOutputStream(processInStream); + this.processOutStream = processOutStream; + this.processRestoreStream = processRestoreStream; + this.recordWriter = new LengthEncodedWriter(this.processInStream); + startTime = ZonedDateTime.now(); + this.numberOfFields = numberOfFields; + this.filesToDelete = filesToDelete; + this.onProcessCrash = Objects.requireNonNull(onProcessCrash); + } + + public abstract String getName(); + + /** + * Starts a process that does not persist any state + * @param executorService the executor service to run on + */ + public void start(ExecutorService executorService) { + logTailFuture = executorService.submit(() -> { + try (CppLogMessageHandler h = cppLogHandler) { + h.tailStream(); + } catch (IOException e) { + if (processKilled == false) { + LOGGER.error(new ParameterizedMessage("[{}] Error tailing {} process logs", jobId, getName()), e); + } + } finally { + if (processCloseInitiated == false && processKilled == false) { + // The log message doesn't say "crashed", as the process could have been killed + // by a user or other process (e.g. the Linux OOM killer) + + String errors = cppLogHandler.getErrors(); + LOGGER.error("[{}] {} process stopped unexpectedly: {}", jobId, getName(), errors); + onProcessCrash.run(); + } + } + }); + } + + /** + * Starts a process that may persist its state + * @param executorService the executor service to run on + * @param stateProcessor the state processor + * @param persistStream the stream where the state is persisted + */ + public void start(ExecutorService executorService, StateProcessor stateProcessor, InputStream persistStream) { + start(executorService); + + stateProcessorFuture = executorService.submit(() -> { + try (InputStream in = persistStream) { + stateProcessor.process(in); + if (processKilled == false) { + LOGGER.info("[{}] State output finished", jobId); + } + } catch (IOException e) { + if (processKilled == false) { + LOGGER.error(new ParameterizedMessage("[{}] Error reading {} state output", jobId, getName()), e); + } + } + }); + } + + @Override + public boolean isReady() { + return isReady; + } + + protected void setReady() { + isReady = true; + } + + @Override + public void writeRecord(String[] record) throws IOException { + recordWriter.writeRecord(record); + } + + @Override + public void flushStream() throws IOException { + recordWriter.flush(); + } + + @Override + public void close() throws IOException { + try { + processCloseInitiated = true; + // closing its input causes the process to exit + processInStream.close(); + // wait for the process to exit by waiting for end-of-file on the named pipe connected + // to the state processor - it may take a long time for all the model state to be + // indexed + if (stateProcessorFuture != null) { + stateProcessorFuture.get(MachineLearningField.STATE_PERSIST_RESTORE_TIMEOUT.getMinutes(), TimeUnit.MINUTES); + } + // the log processor should have stopped by now too - assume processing the logs will + // take no more than 5 seconds longer than processing the state (usually it should + // finish first) + if (logTailFuture != null) { + logTailFuture.get(5, TimeUnit.SECONDS); + } + + if (cppLogHandler.seenFatalError()) { + throw ExceptionsHelper.serverError(cppLogHandler.getErrors()); + } + LOGGER.debug("[{}] {} process exited", jobId, getName()); + } catch (ExecutionException | TimeoutException e) { + LOGGER.warn(new ParameterizedMessage("[{}] Exception closing the running {} process", jobId, getName()), e); + } catch (InterruptedException e) { + LOGGER.warn(new ParameterizedMessage("[{}] Exception closing the running {} process", jobId, getName()), e); + Thread.currentThread().interrupt(); + } finally { + deleteAssociatedFiles(); + } + } + + @Override + public void kill() throws IOException { + processKilled = true; + try { + // The PID comes via the processes log stream. We don't wait for it to arrive here, + // but if the wait times out it implies the process has only just started, in which + // case it should die very quickly when we close its input stream. + NativeControllerHolder.getNativeController().killProcess(cppLogHandler.getPid(Duration.ZERO)); + + // Wait for the process to die before closing processInStream as if the process + // is still alive when processInStream is closed it may start persisting state + cppLogHandler.waitForLogStreamClose(WAIT_FOR_KILL_TIMEOUT); + } catch (TimeoutException e) { + LOGGER.warn("[{}] Failed to get PID of {} process to kill", jobId, getName()); + } finally { + try { + processInStream.close(); + } catch (IOException e) { + // Ignore it - we're shutting down and the method itself has logged a warning + } + try { + deleteAssociatedFiles(); + } catch (IOException e) { + // Ignore it - we're shutting down and the method itself has logged a warning + } + } + } + + private synchronized void deleteAssociatedFiles() throws IOException { + if (filesToDelete == null) { + return; + } + + for (Path fileToDelete : filesToDelete) { + if (Files.deleteIfExists(fileToDelete)) { + LOGGER.debug("[{}] Deleted file {}", jobId, fileToDelete.toString()); + } else { + LOGGER.warn("[{}] Failed to delete file {}", jobId, fileToDelete.toString()); + } + } + + filesToDelete.clear(); + } + + @Override + public ZonedDateTime getProcessStartTime() { + return startTime; + } + + @Override + public boolean isProcessAlive() { + // Sanity check: make sure the process hasn't terminated already + return !cppLogHandler.hasLogStreamEnded(); + } + + @Override + public boolean isProcessAliveAfterWaiting() { + cppLogHandler.waitForLogStreamClose(Duration.ofMillis(45)); + return isProcessAlive(); + } + + @Override + public String readError() { + return cppLogHandler.getErrors(); + } + + protected String jobId() { + return jobId; + } + + protected InputStream processOutStream() { + return processOutStream; + } + + @Nullable + protected OutputStream processRestoreStream() { + return processRestoreStream; + } + + protected int numberOfFields() { + return numberOfFields; + } + + protected LengthEncodedWriter recordWriter() { + return recordWriter; + } + + protected boolean isProcessKilled() { + return processKilled; + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeController.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java similarity index 98% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeController.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java index 0b9cb833c8980..747074028953c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeController.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeController.java @@ -3,13 +3,13 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.env.Environment; -import org.elasticsearch.xpack.ml.job.process.logging.CppLogMessageHandler; +import org.elasticsearch.xpack.ml.process.logging.CppLogMessageHandler; import org.elasticsearch.xpack.ml.utils.NamedPipeHelper; import java.io.BufferedOutputStream; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeControllerHolder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java similarity index 97% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeControllerHolder.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java index 9bcb6e787290d..67e24b44a8494 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeControllerHolder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeControllerHolder.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.ml.MachineLearningField; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeProcess.java new file mode 100644 index 0000000000000..c4f2b4a463185 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeProcess.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.process; + +import java.io.Closeable; +import java.io.IOException; +import java.time.ZonedDateTime; + +/** + * Interface representing a native C++ process + */ +public interface NativeProcess extends Closeable { + + /** + * Is the process ready to receive data? + * @return {@code true} if the process is ready to receive data + */ + boolean isReady(); + + /** + * Write the record to the process. The record parameter should not be encoded + * (i.e. length encoded) the implementation will apply the correct encoding. + * + * @param record Plain array of strings, implementors of this class should + * encode the record appropriately + * @throws IOException If the write failed + */ + void writeRecord(String[] record) throws IOException; + + /** + * Ask the process to persist its state in the background + * @throws IOException If writing the request fails + */ + void persistState() throws IOException; + + /** + * Flush the output data stream + */ + void flushStream() throws IOException; + + /** + * Kill the process. Do not wait for it to stop gracefully. + */ + void kill() throws IOException; + + /** + * The time the process was started + * @return Process start time + */ + ZonedDateTime getProcessStartTime(); + + /** + * Returns true if the process still running. + * Methods instructing the process are essentially + * asynchronous; the command will be continue to execute in the process after + * the call has returned. + * This method tests whether something catastrophic + * occurred in the process during its execution. + * @return True if the process is still running + */ + boolean isProcessAlive(); + + /** + * Check whether the process terminated given a grace period. + * + * Processing errors are highly likely caused by the process being unexpectedly + * terminated. + * + * Workaround: As we can not easily check if the process is alive, we rely on + * the logPipe being ended. As the loghandler runs in another thread which + * might fall behind this one, we give it a grace period. + * + * @return false if process has ended for sure, true if it probably still runs + */ + boolean isProcessAliveAfterWaiting(); + + /** + * Read any content in the error output buffer. + * @return An error message or empty String if no error. + */ + String readError(); +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java similarity index 98% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProvider.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java index 8a0268a8d0793..9670fadfefff3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/NativeStorageProvider.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.Loggers; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/ProcessPipes.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/ProcessPipes.java similarity index 99% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/ProcessPipes.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/ProcessPipes.java index 41a7df348b103..4d468f80176f9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/ProcessPipes.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/ProcessPipes.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.elasticsearch.common.Strings; import org.elasticsearch.env.Environment; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/StateProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/StateProcessor.java new file mode 100644 index 0000000000000..e3937d7199131 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/StateProcessor.java @@ -0,0 +1,14 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.process; + +import java.io.IOException; +import java.io.InputStream; + +public interface StateProcessor { + + void process(InputStream in) throws IOException; +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessage.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessage.java similarity index 99% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessage.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessage.java index 6064cfef31b18..c3310b6b1b5b8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessage.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessage.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.logging; +package org.elasticsearch.xpack.ml.process.logging; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandler.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java similarity index 99% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandler.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java index af0f199dd0c58..341b9ae371b82 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandler.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandler.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.logging; +package org.elasticsearch.xpack.ml.process.logging; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/LengthEncodedWriter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/writer/LengthEncodedWriter.java similarity index 95% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/LengthEncodedWriter.java rename to x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/writer/LengthEncodedWriter.java index 34f9d8dc469fc..e82c963b5ed6c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/LengthEncodedWriter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/writer/LengthEncodedWriter.java @@ -3,9 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.autodetect.writer; +package org.elasticsearch.xpack.ml.process.writer; -import org.elasticsearch.xpack.core.ml.job.process.autodetect.writer.RecordWriter; +import org.elasticsearch.xpack.core.ml.process.writer.RecordWriter; import java.io.IOException; import java.io.OutputStream; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilderTests.java index 325ad52864bfa..9ef56d927f553 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilderTests.java @@ -15,8 +15,8 @@ import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.elasticsearch.xpack.ml.job.process.NativeController; -import org.elasticsearch.xpack.ml.job.process.ProcessPipes; +import org.elasticsearch.xpack.ml.process.NativeController; +import org.elasticsearch.xpack.ml.process.ProcessPipes; import org.junit.Before; import java.nio.file.Path; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java index 93e79c8b97078..6d5adeb3fdbf1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/NativeAutodetectProcessTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutodetectResultsParser; -import org.elasticsearch.xpack.ml.job.process.autodetect.output.StateProcessor; +import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutodetectStateProcessor; import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.TimeRange; @@ -56,7 +56,7 @@ public void testProcessStartTime() throws Exception { mock(OutputStream.class), mock(InputStream.class), mock(OutputStream.class), NUMBER_FIELDS, null, new AutodetectResultsParser(Settings.EMPTY), mock(Runnable.class))) { - process.start(executorService, mock(StateProcessor.class), mock(InputStream.class)); + process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); ZonedDateTime startTime = process.getProcessStartTime(); Thread.sleep(500); @@ -76,7 +76,7 @@ public void testWriteRecord() throws IOException { try (NativeAutodetectProcess process = new NativeAutodetectProcess("foo", logStream, bos, mock(InputStream.class), mock(OutputStream.class), NUMBER_FIELDS, Collections.emptyList(), new AutodetectResultsParser(Settings.EMPTY), mock(Runnable.class))) { - process.start(executorService, mock(StateProcessor.class), mock(InputStream.class)); + process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); process.writeRecord(record); process.flushStream(); @@ -108,7 +108,7 @@ public void testFlush() throws IOException { try (NativeAutodetectProcess process = new NativeAutodetectProcess("foo", logStream, bos, mock(InputStream.class), mock(OutputStream.class), NUMBER_FIELDS, Collections.emptyList(), new AutodetectResultsParser(Settings.EMPTY), mock(Runnable.class))) { - process.start(executorService, mock(StateProcessor.class), mock(InputStream.class)); + process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); FlushJobParams params = FlushJobParams.builder().build(); process.flushJob(params); @@ -128,7 +128,7 @@ public void testWriteUpdateConfigMessage() throws IOException { } public void testPersistJob() throws IOException { - testWriteMessage(p -> p.persistJob(), ControlMsgToProcessWriter.BACKGROUND_PERSIST_MESSAGE_CODE); + testWriteMessage(p -> p.persistState(), ControlMsgToProcessWriter.BACKGROUND_PERSIST_MESSAGE_CODE); } public void testWriteMessage(CheckedConsumer writeFunction, String expectedMessageCode) throws IOException { @@ -138,7 +138,7 @@ public void testWriteMessage(CheckedConsumer writeFunct try (NativeAutodetectProcess process = new NativeAutodetectProcess("foo", logStream, bos, mock(InputStream.class), mock(OutputStream.class), NUMBER_FIELDS, Collections.emptyList(), new AutodetectResultsParser(Settings.EMPTY), mock(Runnable.class))) { - process.start(executorService, mock(StateProcessor.class), mock(InputStream.class)); + process.start(executorService, mock(AutodetectStateProcessor.class), mock(InputStream.class)); writeFunction.accept(process); process.writeUpdateModelPlotMessage(new ModelPlotConfig()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessorTests.java similarity index 88% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessorTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessorTests.java index 31b96d8393d12..e4fb5a7f07456 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/StateProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/output/AutodetectStateProcessorTests.java @@ -26,7 +26,6 @@ import java.util.List; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -37,7 +36,7 @@ /** * Tests for reading state from the native process. */ -public class StateProcessorTests extends ESTestCase { +public class AutodetectStateProcessorTests extends ESTestCase { private static final String STATE_SAMPLE = "" + "{\"index\": {\"_index\": \"test\", \"_type\": \"type1\", \"_id\": \"1\"}}\n" @@ -50,18 +49,20 @@ public class StateProcessorTests extends ESTestCase { + "{ \"field\" : \"value3\" }\n" + "\0"; + private static final String JOB_ID = "state-processor-test-job"; + private static final int NUM_LARGE_DOCS = 2; private static final int LARGE_DOC_SIZE = 1000000; private Client client; - private StateProcessor stateProcessor; + private AutodetectStateProcessor stateProcessor; @Before public void initialize() throws IOException { client = mock(Client.class); @SuppressWarnings("unchecked") ActionFuture bulkResponseFuture = mock(ActionFuture.class); - stateProcessor = spy(new StateProcessor(Settings.EMPTY, client)); + stateProcessor = spy(new AutodetectStateProcessor(client, JOB_ID)); when(client.bulk(any(BulkRequest.class))).thenReturn(bulkResponseFuture); ThreadPool threadPool = mock(ThreadPool.class); when(client.threadPool()).thenReturn(threadPool); @@ -75,9 +76,9 @@ public void verifyNoMoreClientInteractions() { public void testStateRead() throws IOException { ByteArrayInputStream stream = new ByteArrayInputStream(STATE_SAMPLE.getBytes(StandardCharsets.UTF_8)); - stateProcessor.process("_id", stream); + stateProcessor.process(stream); ArgumentCaptor bytesRefCaptor = ArgumentCaptor.forClass(BytesReference.class); - verify(stateProcessor, times(3)).persist(eq("_id"), bytesRefCaptor.capture()); + verify(stateProcessor, times(3)).persist(bytesRefCaptor.capture()); String[] threeStates = STATE_SAMPLE.split("\0"); List capturedBytes = bytesRefCaptor.getAllValues(); @@ -92,9 +93,9 @@ public void testStateReadGivenConsecutiveZeroBytes() throws IOException { String zeroBytes = "\0\0\0\0\0\0"; ByteArrayInputStream stream = new ByteArrayInputStream(zeroBytes.getBytes(StandardCharsets.UTF_8)); - stateProcessor.process("_id", stream); + stateProcessor.process(stream); - verify(stateProcessor, never()).persist(eq("_id"), any()); + verify(stateProcessor, never()).persist(any()); Mockito.verifyNoMoreInteractions(client); } @@ -102,9 +103,9 @@ public void testStateReadGivenConsecutiveSpacesFollowedByZeroByte() throws IOExc String zeroBytes = " \n\0"; ByteArrayInputStream stream = new ByteArrayInputStream(zeroBytes.getBytes(StandardCharsets.UTF_8)); - stateProcessor.process("_id", stream); + stateProcessor.process(stream); - verify(stateProcessor, times(1)).persist(eq("_id"), any()); + verify(stateProcessor, times(1)).persist(any()); Mockito.verifyNoMoreInteractions(client); } @@ -125,8 +126,8 @@ public void testLargeStateRead() throws Exception { } ByteArrayInputStream stream = new ByteArrayInputStream(builder.toString().getBytes(StandardCharsets.UTF_8)); - stateProcessor.process("_id", stream); - verify(stateProcessor, times(NUM_LARGE_DOCS)).persist(eq("_id"), any()); + stateProcessor.process(stream); + verify(stateProcessor, times(NUM_LARGE_DOCS)).persist(any()); verify(client, times(NUM_LARGE_DOCS)).bulk(any(BulkRequest.class)); verify(client, times(NUM_LARGE_DOCS)).threadPool(); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriterTests.java index 38bef42f800cf..01bdd6a999f26 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/AbstractDataToProcessWriterTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.ml.job.process.DataCountsReporter; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcess; import org.elasticsearch.xpack.ml.job.process.autodetect.writer.AbstractDataToProcessWriter.InputOutputMap; +import org.elasticsearch.xpack.ml.process.writer.LengthEncodedWriter; import org.junit.Before; import org.mockito.Mockito; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriterTests.java index 3d08f5a1c25fb..57554227e9ad3 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/ControlMsgToProcessWriterTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams; import org.elasticsearch.xpack.ml.job.process.autodetect.params.TimeRange; +import org.elasticsearch.xpack.ml.process.writer.LengthEncodedWriter; import org.junit.Before; import org.mockito.ArgumentCaptor; import org.mockito.InOrder; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerTests.java index 661eeca98db8f..04ea8f2c70ef3 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/NormalizerTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.job.results.BucketInfluencer; @@ -32,7 +31,7 @@ public class NormalizerTests extends ESTestCase { private static final double INITIAL_SCORE = 3.0; private static final double FACTOR = 2.0; - private Bucket generateBucket(Date timestamp) throws IOException { + private Bucket generateBucket(Date timestamp) { return new Bucket(JOB_ID, timestamp, BUCKET_SPAN); } @@ -49,8 +48,8 @@ public void testNormalize() throws IOException, InterruptedException { ExecutorService threadpool = Executors.newScheduledThreadPool(1); try { NormalizerProcessFactory processFactory = mock(NormalizerProcessFactory.class); - when(processFactory.createNormalizerProcess(eq(JOB_ID), eq(QUANTILES_STATE), eq(BUCKET_SPAN), - any())).thenReturn(new MultiplyingNormalizerProcess(Settings.EMPTY, FACTOR)); + when(processFactory.createNormalizerProcess(eq(JOB_ID), eq(QUANTILES_STATE), eq(BUCKET_SPAN), any())) + .thenReturn(new MultiplyingNormalizerProcess(FACTOR)); Normalizer normalizer = new Normalizer(JOB_ID, processFactory, threadpool); Bucket bucket = generateBucket(new Date(0)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandlerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandlerTests.java index 9e6a4afc4e318..cc0234df39ed7 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandlerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/normalizer/output/NormalizerResultHandlerTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.ml.job.process.normalizer.output; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerResult; @@ -32,7 +31,7 @@ public void testParse() throws IOException { + "\"value_field_name\":\"x\",\"probability\":0.03,\"normalized_score\":22.22}\n"; InputStream is = new ByteArrayInputStream(testData.getBytes(StandardCharsets.UTF_8)); - NormalizerResultHandler handler = new NormalizerResultHandler(Settings.EMPTY, is); + NormalizerResultHandler handler = new NormalizerResultHandler(is); handler.process(); List results = handler.getNormalizedResults(); assertEquals(3, results.size()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeControllerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java similarity index 99% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeControllerTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java index 08c73cdd9c7e9..ac00e8a24e1cf 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeControllerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeControllerTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeStorageProviderTests.java similarity index 99% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProviderTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeStorageProviderTests.java index 3103e76c82bde..fd87e29387e0b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/NativeStorageProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/NativeStorageProviderTests.java @@ -4,7 +4,7 @@ * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -21,12 +21,11 @@ import java.util.HashMap; import java.util.Map; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import static org.mockito.Mockito.any; - -import static org.mockito.Mockito.spy; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; public class NativeStorageProviderTests extends ESTestCase { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/ProcessPipesTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/ProcessPipesTests.java similarity index 99% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/ProcessPipesTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/ProcessPipesTests.java index 708d7af152014..fa703e778c49d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/ProcessPipesTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/ProcessPipesTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process; +package org.elasticsearch.xpack.ml.process; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandlerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java similarity index 99% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandlerTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java index af2691d6f3575..d490d58c3ab52 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageHandlerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageHandlerTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.logging; +package org.elasticsearch.xpack.ml.process.logging; import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Logger; @@ -203,7 +203,7 @@ public void testParseFatalError() throws IOException, IllegalAccessException { } } - private static void executeLoggingTest(InputStream is, MockLogAppender mockAppender, Level level, String jobId) + private static void executeLoggingTest(InputStream is, MockLogAppender mockAppender, Level level, String jobId) throws IOException { Logger cppMessageLogger = Loggers.getLogger(CppLogMessageHandler.class); Loggers.addAppender(cppMessageLogger, mockAppender); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageTests.java similarity index 98% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageTests.java index d3145bb9f6c6b..c6a0bdf151a48 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/logging/CppLogMessageTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/logging/CppLogMessageTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.logging; +package org.elasticsearch.xpack.ml.process.logging; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.xcontent.DeprecationHandler; @@ -72,4 +72,4 @@ protected Reader instanceReader() { protected CppLogMessage doParseInstance(XContentParser parser) { return CppLogMessage.PARSER.apply(parser, null); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/LengthEncodedWriterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/writer/LengthEncodedWriterTests.java similarity index 99% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/LengthEncodedWriterTests.java rename to x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/writer/LengthEncodedWriterTests.java index 36f8c8f003050..0e9aff1fb2caf 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/writer/LengthEncodedWriterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/writer/LengthEncodedWriterTests.java @@ -3,7 +3,7 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.ml.job.process.autodetect.writer; +package org.elasticsearch.xpack.ml.process.writer; import org.elasticsearch.test.ESTestCase; import org.junit.Assert; From af28d1f64869d537aa3db9f3025d8a0a60d783e8 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Fri, 26 Oct 2018 08:47:39 -0600 Subject: [PATCH 5/9] Fix line length for org.elasticsearch.common.* files (#34888) This removes the checkstyle suppressions for things in the `common` package. Relates to #34884 --- .../resources/checkstyle_suppressions.xml | 34 ----- .../org/elasticsearch/common/Numbers.java | 9 +- .../common/blobstore/fs/FsBlobStore.java | 3 +- .../common/bytes/BytesArray.java | 3 +- .../common/bytes/PagedBytesReference.java | 3 +- .../org/elasticsearch/common/cache/Cache.java | 3 +- .../common/collect/ImmutableOpenIntMap.java | 4 +- .../DefaultConstructionProxyFactory.java | 3 +- .../inject/internal/ConstructionContext.java | 3 +- .../inject/multibindings/MapBinder.java | 6 +- .../common/inject/spi/InjectionPoint.java | 3 +- .../org/elasticsearch/common/io/Channels.java | 15 ++- .../org/elasticsearch/common/joda/Joda.java | 24 ++-- .../common/lucene/search/XMoreLikeThis.java | 3 +- .../elasticsearch/common/network/Cidrs.java | 6 +- .../common/network/NetworkService.java | 9 +- .../common/recycler/Recyclers.java | 3 +- .../elasticsearch/common/util/BigArrays.java | 24 ++-- .../common/util/CancellableThreads.java | 3 +- .../common/util/CollectionUtils.java | 3 +- .../common/util/concurrent/EsExecutors.java | 15 ++- .../common/util/concurrent/ThreadBarrier.java | 24 ++-- .../common/util/concurrent/ThreadContext.java | 23 ++-- .../common/xcontent/XContentHelper.java | 12 +- .../common/geo/ShapeBuilderTests.java | 3 +- .../common/hash/MessageDigestsTests.java | 24 ++-- .../common/network/CidrsTests.java | 3 +- .../common/unit/DistanceUnitTests.java | 6 +- .../common/unit/FuzzinessTests.java | 3 +- .../common/util/LongObjectHashMapTests.java | 3 +- .../util/concurrent/EsExecutorsTests.java | 6 +- .../concurrent/PrioritizedExecutorsTests.java | 3 +- .../builder/XContentBuilderTests.java | 6 +- .../FilterPathGeneratorFilteringTests.java | 117 +++++++++++------- 34 files changed, 242 insertions(+), 170 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 1297b305ea0c4..267488e97a04f 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -164,29 +164,6 @@ - - - - - - - - - - - - - - - - - - - - - - - @@ -407,17 +384,6 @@ - - - - - - - - - - - diff --git a/server/src/main/java/org/elasticsearch/common/Numbers.java b/server/src/main/java/org/elasticsearch/common/Numbers.java index 2c4d700c92ce3..7561175f3fe35 100644 --- a/server/src/main/java/org/elasticsearch/common/Numbers.java +++ b/server/src/main/java/org/elasticsearch/common/Numbers.java @@ -61,7 +61,8 @@ public static int bytesToInt(byte[] arr) { } public static int bytesToInt(BytesRef bytes) { - return (bytes.bytes[bytes.offset] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 2] & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff); + return (bytes.bytes[bytes.offset] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | + ((bytes.bytes[bytes.offset + 2] & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff); } /** @@ -77,8 +78,10 @@ public static long bytesToLong(byte[] arr) { } public static long bytesToLong(BytesRef bytes) { - int high = (bytes.bytes[bytes.offset + 0] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 2] & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff); - int low = (bytes.bytes[bytes.offset + 4] << 24) | ((bytes.bytes[bytes.offset + 5] & 0xff) << 16) | ((bytes.bytes[bytes.offset + 6] & 0xff) << 8) | (bytes.bytes[bytes.offset + 7] & 0xff); + int high = (bytes.bytes[bytes.offset + 0] << 24) | ((bytes.bytes[bytes.offset + 1] & 0xff) << 16) | + ((bytes.bytes[bytes.offset + 2] & 0xff) << 8) | (bytes.bytes[bytes.offset + 3] & 0xff); + int low = (bytes.bytes[bytes.offset + 4] << 24) | ((bytes.bytes[bytes.offset + 5] & 0xff) << 16) | + ((bytes.bytes[bytes.offset + 6] & 0xff) << 8) | (bytes.bytes[bytes.offset + 7] & 0xff); return (((long) high) << 32) | (low & 0x0ffffffffL); } diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index 29f3b2f7e15fa..c49143edb446e 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -48,7 +48,8 @@ public FsBlobStore(Settings settings, Path path) throws IOException { if (!this.readOnly) { Files.createDirectories(path); } - this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.fs.buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes(); + this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.fs.buffer_size", + new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes(); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java b/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java index 9b78c2fe5a788..de21acc487df5 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/BytesArray.java @@ -68,7 +68,8 @@ public int length() { @Override public BytesReference slice(int from, int length) { if (from < 0 || (from + length) > this.length) { - throw new IllegalArgumentException("can't slice a buffer with length [" + this.length + "], with slice parameters from [" + from + "], length [" + length + "]"); + throw new IllegalArgumentException("can't slice a buffer with length [" + this.length + + "], with slice parameters from [" + from + "], length [" + length + "]"); } return new BytesArray(bytes, offset + from, length); } diff --git a/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java index b336acfba2008..f6dcdfccca01a 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java @@ -63,7 +63,8 @@ public int length() { @Override public BytesReference slice(int from, int length) { if (from < 0 || (from + length) > length()) { - throw new IllegalArgumentException("can't slice a buffer with length [" + length() + "], with slice parameters from [" + from + "], length [" + length + "]"); + throw new IllegalArgumentException("can't slice a buffer with length [" + length() + + "], with slice parameters from [" + from + "], length [" + length + "]"); } return new PagedBytesReference(bigarrays, byteArray, offset + from, length); } diff --git a/server/src/main/java/org/elasticsearch/common/cache/Cache.java b/server/src/main/java/org/elasticsearch/common/cache/Cache.java index beb2819f2e6dc..67061a1533475 100644 --- a/server/src/main/java/org/elasticsearch/common/cache/Cache.java +++ b/server/src/main/java/org/elasticsearch/common/cache/Cache.java @@ -485,7 +485,8 @@ private void put(K key, V value, long now) { promote(tuple.v1(), now); } if (replaced) { - removalListener.onRemoval(new RemovalNotification<>(tuple.v2().key, tuple.v2().value, RemovalNotification.RemovalReason.REPLACED)); + removalListener.onRemoval(new RemovalNotification<>(tuple.v2().key, tuple.v2().value, + RemovalNotification.RemovalReason.REPLACED)); } } diff --git a/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java b/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java index 43e3552909b36..cb4457ce24b9b 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java +++ b/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenIntMap.java @@ -39,8 +39,8 @@ /** * An immutable map implementation based on open hash map. *

- * Can be constructed using a {@link #builder()}, or using {@link #builder(org.elasticsearch.common.collect.ImmutableOpenIntMap)} (which is an optimized - * option to copy over existing content and modify it). + * Can be constructed using a {@link #builder()}, or using {@link #builder(org.elasticsearch.common.collect.ImmutableOpenIntMap)} + * (which is an optimized option to copy over existing content and modify it). */ public final class ImmutableOpenIntMap implements Iterable> { diff --git a/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java b/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java index 49ada56cefa6b..36c55d0cb932a 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java +++ b/server/src/main/java/org/elasticsearch/common/inject/DefaultConstructionProxyFactory.java @@ -50,7 +50,8 @@ public T newInstance(Object... arguments) throws InvocationTargetException { } catch (InstantiationException e) { throw new AssertionError(e); // shouldn't happen, we know this is a concrete type } catch (IllegalAccessException e) { - throw new AssertionError("Wrong access modifiers on " + constructor, e); // a security manager is blocking us, we're hosed + // a security manager is blocking us, we're hosed + throw new AssertionError("Wrong access modifiers on " + constructor, e); } } diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java b/server/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java index 34c9faf77e770..0813f1f51b34d 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/ConstructionContext.java @@ -79,7 +79,8 @@ public Object createProxy(Errors errors, Class expectedType) throws ErrorsExc // ES: Replace, since we don't use bytecode gen, just get the type class loader, or system if its null //ClassLoader classLoader = BytecodeGen.getClassLoader(expectedType); - ClassLoader classLoader = expectedType.getClassLoader() == null ? ClassLoader.getSystemClassLoader() : expectedType.getClassLoader(); + ClassLoader classLoader = expectedType.getClassLoader() == null ? + ClassLoader.getSystemClassLoader() : expectedType.getClassLoader(); return expectedType.cast(Proxy.newProxyInstance(classLoader, new Class[]{expectedType}, invocationHandler)); } diff --git a/server/src/main/java/org/elasticsearch/common/inject/multibindings/MapBinder.java b/server/src/main/java/org/elasticsearch/common/inject/multibindings/MapBinder.java index a9a1bb173b797..a0a22d96f58d5 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/multibindings/MapBinder.java +++ b/server/src/main/java/org/elasticsearch/common/inject/multibindings/MapBinder.java @@ -275,7 +275,8 @@ public static class MapBinderProviderWithDependencies implements ProviderWi private final Provider>>> provider; @SuppressWarnings("rawtypes") // code is silly stupid with generics - MapBinderProviderWithDependencies(RealMapBinder binder, Set> dependencies, Provider>>> provider) { + MapBinderProviderWithDependencies(RealMapBinder binder, Set> dependencies, + Provider>>> provider) { this.binder = binder; this.dependencies = dependencies; this.provider = provider; @@ -315,7 +316,8 @@ public void configure(Binder binder) { // binds a Map> from a collection of Map> final Provider>>> entrySetProvider = binder .getProvider(entrySetBinder.getSetKey()); - binder.bind(providerMapKey).toProvider(new MapBinderProviderWithDependencies(RealMapBinder.this, dependencies, entrySetProvider)); + binder.bind(providerMapKey) + .toProvider(new MapBinderProviderWithDependencies(RealMapBinder.this, dependencies, entrySetProvider)); final Provider>> mapProvider = binder.getProvider(providerMapKey); binder.bind(mapKey).toProvider(new ProviderWithDependencies>() { diff --git a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java index 333938843c13e..07ef3162300d1 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java +++ b/server/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java @@ -345,7 +345,8 @@ private static void checkForMisplacedBindingAnnotations(Member member, Errors er } private static void addInjectionPoints(TypeLiteral type, - Factory factory, boolean statics, Collection injectionPoints, + Factory factory, boolean statics, + Collection injectionPoints, Errors errors) { if (type.getType() == Object.class) { return; diff --git a/server/src/main/java/org/elasticsearch/common/io/Channels.java b/server/src/main/java/org/elasticsearch/common/io/Channels.java index cb8ac062fbcb2..1d76be43ca981 100644 --- a/server/src/main/java/org/elasticsearch/common/io/Channels.java +++ b/server/src/main/java/org/elasticsearch/common/io/Channels.java @@ -62,7 +62,8 @@ public static byte[] readFromFileChannel(FileChannel channel, long position, int * @param destOffset offset in dest to read into * @param length number of bytes to read */ - public static void readFromFileChannelWithEofException(FileChannel channel, long channelPosition, byte[] dest, int destOffset, int length) throws IOException { + public static void readFromFileChannelWithEofException(FileChannel channel, long channelPosition, + byte[] dest, int destOffset, int length) throws IOException { int read = readFromFileChannel(channel, channelPosition, dest, destOffset, length); if (read < 0) { throw new EOFException("read past EOF. pos [" + channelPosition + "] length: [" + length + "] end: [" + channel.size() + "]"); @@ -80,7 +81,8 @@ public static void readFromFileChannelWithEofException(FileChannel channel, long * @return total bytes read or -1 if an attempt was made to read past EOF. The method always tries to read all the bytes * that will fit in the destination byte buffer. */ - public static int readFromFileChannel(FileChannel channel, long channelPosition, byte[] dest, int destOffset, int length) throws IOException { + public static int readFromFileChannel(FileChannel channel, long channelPosition, byte[] dest, + int destOffset, int length) throws IOException { ByteBuffer buffer = ByteBuffer.wrap(dest, destOffset, length); return readFromFileChannel(channel, channelPosition, buffer); } @@ -97,7 +99,8 @@ public static int readFromFileChannel(FileChannel channel, long channelPosition, public static void readFromFileChannelWithEofException(FileChannel channel, long channelPosition, ByteBuffer dest) throws IOException { int read = readFromFileChannel(channel, channelPosition, dest); if (read < 0) { - throw new EOFException("read past EOF. pos [" + channelPosition + "] length: [" + dest.limit() + "] end: [" + channel.size() + "]"); + throw new EOFException("read past EOF. pos [" + channelPosition + + "] length: [" + dest.limit() + "] end: [" + channel.size() + "]"); } } @@ -135,7 +138,8 @@ public static int readFromFileChannel(FileChannel channel, long channelPosition, dest.position(tmpBuffer.position()); } - assert bytesRead == bytesToRead : "failed to read an entire buffer but also didn't get an EOF (read [" + bytesRead + "] needed [" + bytesToRead + "]"; + assert bytesRead == bytesToRead : "failed to read an entire buffer but also didn't get an EOF (read [" + + bytesRead + "] needed [" + bytesToRead + "]"; return bytesRead; } } @@ -149,7 +153,8 @@ private static int readSingleChunk(FileChannel channel, long channelPosition, By return read; } - assert read > 0 : "FileChannel.read with non zero-length bb.remaining() must always read at least one byte (FileChannel is in blocking mode, see spec of ReadableByteChannel)"; + assert read > 0 : "FileChannel.read with non zero-length bb.remaining() must always read at least one byte " + + "(FileChannel is in blocking mode, see spec of ReadableByteChannel)"; bytesRead += read; channelPosition += read; diff --git a/server/src/main/java/org/elasticsearch/common/joda/Joda.java b/server/src/main/java/org/elasticsearch/common/joda/Joda.java index 35ae6e2341f8d..9b3e5974fb6ca 100644 --- a/server/src/main/java/org/elasticsearch/common/joda/Joda.java +++ b/server/src/main/java/org/elasticsearch/common/joda/Joda.java @@ -154,9 +154,11 @@ public static FormatDateTimeFormatter forPattern(String input, Locale locale) { } else if ("yearMonthDay".equals(input) || "year_month_day".equals(input)) { formatter = ISODateTimeFormat.yearMonthDay(); } else if ("epoch_second".equals(input)) { - formatter = new DateTimeFormatterBuilder().append(new EpochTimePrinter(false), new EpochTimeParser(false)).toFormatter(); + formatter = new DateTimeFormatterBuilder().append(new EpochTimePrinter(false), + new EpochTimeParser(false)).toFormatter(); } else if ("epoch_millis".equals(input)) { - formatter = new DateTimeFormatterBuilder().append(new EpochTimePrinter(true), new EpochTimeParser(true)).toFormatter(); + formatter = new DateTimeFormatterBuilder().append(new EpochTimePrinter(true), + new EpochTimeParser(true)).toFormatter(); // strict date formats here, must be at least 4 digits for year and two for months and two for day } else if ("strictBasicWeekDate".equals(input) || "strict_basic_week_date".equals(input)) { formatter = StrictISODateTimeFormat.basicWeekDate(); @@ -245,7 +247,8 @@ public static FormatDateTimeFormatter forPattern(String input, Locale locale) { parsers[i] = currentParser.getParser(); } - DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().append(dateTimeFormatter.withZone(DateTimeZone.UTC).getPrinter(), parsers); + DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder() + .append(dateTimeFormatter.withZone(DateTimeZone.UTC).getPrinter(), parsers); formatter = builder.toFormatter(); } } else { @@ -286,9 +289,11 @@ public static FormatDateTimeFormatter getStrictStandardDateFormatter() { .toFormatter() .withZoneUTC(); - DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().append(longFormatter.withZone(DateTimeZone.UTC).getPrinter(), new DateTimeParser[]{longFormatter.getParser(), shortFormatter.getParser(), new EpochTimeParser(true)}); + DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().append(longFormatter.withZone(DateTimeZone.UTC).getPrinter(), + new DateTimeParser[]{longFormatter.getParser(), shortFormatter.getParser(), new EpochTimeParser(true)}); - return new FormatDateTimeFormatter("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis", builder.toFormatter().withZone(DateTimeZone.UTC), Locale.ROOT); + return new FormatDateTimeFormatter("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis", + builder.toFormatter().withZone(DateTimeZone.UTC), Locale.ROOT); } @@ -312,7 +317,8 @@ public DurationFieldType getRangeDurationType() { @Override public DateTimeField getField(Chronology chronology) { - return new OffsetDateTimeField(new DividedDateTimeField(new OffsetDateTimeField(chronology.monthOfYear(), -1), QuarterOfYear, 3), 1); + return new OffsetDateTimeField( + new DividedDateTimeField(new OffsetDateTimeField(chronology.monthOfYear(), -1), QuarterOfYear, 3), 1); } }; @@ -393,7 +399,8 @@ public void printTo(StringBuffer buf, long instant, Chronology chrono, int displ * {@link DateTimeFormatter#printTo(Appendable, long, Chronology)} when using a time zone. */ @Override - public void printTo(Writer out, long instant, Chronology chrono, int displayOffset, DateTimeZone displayZone, Locale locale) throws IOException { + public void printTo(Writer out, long instant, Chronology chrono, int displayOffset, + DateTimeZone displayZone, Locale locale) throws IOException { if (hasMilliSecondPrecision) { out.write(String.valueOf(instant - displayOffset)); } else { @@ -427,7 +434,8 @@ private long getDateTimeMillis(ReadablePartial partial) { int minuteOfHour = partial.get(DateTimeFieldType.minuteOfHour()); int secondOfMinute = partial.get(DateTimeFieldType.secondOfMinute()); int millisOfSecond = partial.get(DateTimeFieldType.millisOfSecond()); - return partial.getChronology().getDateTimeMillis(year, monthOfYear, dayOfMonth, hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond); + return partial.getChronology().getDateTimeMillis(year, monthOfYear, dayOfMonth, + hourOfDay, minuteOfHour, secondOfMinute, millisOfSecond); } } } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java b/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java index f931ee2dc31a7..1920db12117d4 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java @@ -157,7 +157,8 @@ public final class XMoreLikeThis { // static { -// assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_4_9: "Remove this class once we upgrade to Lucene 5.0"; +// assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_4_9: +// "Remove this class once we upgrade to Lucene 5.0"; // } /** diff --git a/server/src/main/java/org/elasticsearch/common/network/Cidrs.java b/server/src/main/java/org/elasticsearch/common/network/Cidrs.java index 1bdd7bf562b93..bdf2257e90298 100644 --- a/server/src/main/java/org/elasticsearch/common/network/Cidrs.java +++ b/server/src/main/java/org/elasticsearch/common/network/Cidrs.java @@ -40,13 +40,15 @@ public static long[] cidrMaskToMinMax(String cidr) { String[] fields = cidr.split("/"); if (fields.length != 2) { throw new IllegalArgumentException( - String.format(Locale.ROOT, "invalid IPv4/CIDR; expected [a.b.c.d, e] but was [%s] after splitting on \"/\" in [%s]", Arrays.toString(fields), cidr) + String.format(Locale.ROOT, "invalid IPv4/CIDR; expected [a.b.c.d, e] but was [%s] after splitting on \"/\" in [%s]", + Arrays.toString(fields), cidr) ); } // do not try to parse IPv4-mapped IPv6 address if (fields[0].contains(":")) { throw new IllegalArgumentException( - String.format(Locale.ROOT, "invalid IPv4/CIDR; expected [a.b.c.d, e] where a, b, c, d are decimal octets but was [%s] after splitting on \"/\" in [%s]", Arrays.toString(fields), cidr) + String.format(Locale.ROOT, "invalid IPv4/CIDR; expected [a.b.c.d, e] where a, b, c, d are decimal octets " + + "but was [%s] after splitting on \"/\" in [%s]", Arrays.toString(fields), cidr) ); } byte[] addressBytes; diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkService.java b/server/src/main/java/org/elasticsearch/common/network/NetworkService.java index 7dab3e5256682..de4aee289d336 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkService.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkService.java @@ -113,7 +113,8 @@ public InetAddress[] resolveBindHostAddresses(String bindHosts[]) throws IOExcep } // check if its a wildcard address: this is only ok if its the only address! if (address.isAnyLocalAddress() && addresses.length > 1) { - throw new IllegalArgumentException("bind address: {" + NetworkAddress.format(address) + "} is wildcard, but multiple addresses specified: this makes no sense"); + throw new IllegalArgumentException("bind address: {" + NetworkAddress.format(address) + + "} is wildcard, but multiple addresses specified: this makes no sense"); } } return addresses; @@ -156,12 +157,14 @@ public InetAddress resolvePublishHostAddresses(String publishHosts[]) throws IOE for (InetAddress address : addresses) { // check if its multicast: flat out mistake if (address.isMulticastAddress()) { - throw new IllegalArgumentException("publish address: {" + NetworkAddress.format(address) + "} is invalid: multicast address"); + throw new IllegalArgumentException("publish address: {" + NetworkAddress.format(address) + + "} is invalid: multicast address"); } // check if its a wildcard address: this is only ok if its the only address! // (if it was a single wildcard address, it was replaced by step 1 above) if (address.isAnyLocalAddress()) { - throw new IllegalArgumentException("publish address: {" + NetworkAddress.format(address) + "} is wildcard, but multiple addresses specified: this makes no sense"); + throw new IllegalArgumentException("publish address: {" + NetworkAddress.format(address) + + "} is wildcard, but multiple addresses specified: this makes no sense"); } } diff --git a/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java b/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java index f84441fbce436..b19f569481db1 100644 --- a/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java +++ b/server/src/main/java/org/elasticsearch/common/recycler/Recyclers.java @@ -148,7 +148,8 @@ public boolean isRecycled() { } /** - * Create a concurrent implementation that can support concurrent access from concurrencyLevel threads with little contention. + * Create a concurrent implementation that can support concurrent access from + * concurrencyLevel threads with little contention. */ public static Recycler concurrent(final Recycler.Factory factory, final int concurrencyLevel) { if (concurrencyLevel < 1) { diff --git a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java index 1e305d60fea03..12c511311ea5b 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -44,7 +44,8 @@ public class BigArrays implements Releasable { public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / Long.BYTES; public static final int OBJECT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF; - /** Returns the next size to grow when working with parallel arrays that may have different page sizes or number of bytes per element. */ + /** Returns the next size to grow when working with parallel arrays that + * may have different page sizes or number of bytes per element. */ public static long overSize(long minTargetSize) { return overSize(minTargetSize, PAGE_SIZE_IN_BYTES / 8, 1); } @@ -345,7 +346,8 @@ private static class ObjectArrayWrapper extends AbstractArrayWrapper implemen @Override public long ramBytesUsed() { - return SHALLOW_SIZE + RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + RamUsageEstimator.NUM_BYTES_OBJECT_REF * size()); + return SHALLOW_SIZE + RamUsageEstimator.alignObjectSize(RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + + RamUsageEstimator.NUM_BYTES_OBJECT_REF * size()); } @SuppressWarnings("unchecked") @@ -503,7 +505,8 @@ public ByteArray resize(ByteArray array, long size) { } } - /** Grow an array to a size that is larger than minSize, preserving content, and potentially reusing part of the provided array. */ + /** Grow an array to a size that is larger than minSize, + * preserving content, and potentially reusing part of the provided array. */ public ByteArray grow(ByteArray array, long minSize) { if (minSize <= array.size()) { return array; @@ -587,7 +590,8 @@ public IntArray resize(IntArray array, long size) { } } - /** Grow an array to a size that is larger than minSize, preserving content, and potentially reusing part of the provided array. */ + /** Grow an array to a size that is larger than minSize, + * preserving content, and potentially reusing part of the provided array. */ public IntArray grow(IntArray array, long minSize) { if (minSize <= array.size()) { return array; @@ -638,7 +642,8 @@ public LongArray resize(LongArray array, long size) { } } - /** Grow an array to a size that is larger than minSize, preserving content, and potentially reusing part of the provided array. */ + /** Grow an array to a size that is larger than minSize, + * preserving content, and potentially reusing part of the provided array. */ public LongArray grow(LongArray array, long minSize) { if (minSize <= array.size()) { return array; @@ -686,7 +691,8 @@ public DoubleArray resize(DoubleArray array, long size) { } } - /** Grow an array to a size that is larger than minSize, preserving content, and potentially reusing part of the provided array. */ + /** Grow an array to a size that is larger than minSize, + * preserving content, and potentially reusing part of the provided array. */ public DoubleArray grow(DoubleArray array, long minSize) { if (minSize <= array.size()) { return array; @@ -734,7 +740,8 @@ public FloatArray resize(FloatArray array, long size) { } } - /** Grow an array to a size that is larger than minSize, preserving content, and potentially reusing part of the provided array. */ + /** Grow an array to a size that is larger than minSize, + * preserving content, and potentially reusing part of the provided array. */ public FloatArray grow(FloatArray array, long minSize) { if (minSize <= array.size()) { return array; @@ -775,7 +782,8 @@ public ObjectArray resize(ObjectArray array, long size) { } } - /** Grow an array to a size that is larger than minSize, preserving content, and potentially reusing part of the provided array. */ + /** Grow an array to a size that is larger than minSize, + * preserving content, and potentially reusing part of the provided array. */ public ObjectArray grow(ObjectArray array, long minSize) { if (minSize <= array.size()) { return array; diff --git a/server/src/main/java/org/elasticsearch/common/util/CancellableThreads.java b/server/src/main/java/org/elasticsearch/common/util/CancellableThreads.java index 4399ba6a8fe5b..c2f55b8d9b939 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CancellableThreads.java +++ b/server/src/main/java/org/elasticsearch/common/util/CancellableThreads.java @@ -45,7 +45,8 @@ public synchronized boolean isCancelled() { } - /** call this will throw an exception if operation was cancelled. Override {@link #onCancel(String, Exception)} for custom failure logic */ + /** call this will throw an exception if operation was cancelled. + * Override {@link #onCancel(String, Exception)} for custom failure logic */ public synchronized void checkForCancel() { if (isCancelled()) { onCancel(reason, null); diff --git a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index 391f23c4f94c0..ce1bfe87131ba 100644 --- a/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/server/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -307,7 +307,8 @@ public static void sort(final BytesRefArray bytes, final int[] indices) { sort(new BytesRefBuilder(), new BytesRefBuilder(), bytes, indices); } - private static void sort(final BytesRefBuilder scratch, final BytesRefBuilder scratch1, final BytesRefArray bytes, final int[] indices) { + private static void sort(final BytesRefBuilder scratch, final BytesRefBuilder scratch1, + final BytesRefArray bytes, final int[] indices) { final int numValues = bytes.size(); assert indices.length >= numValues; diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index d38eb03fae3dd..abc95810ba9a9 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -59,25 +59,30 @@ public static int numberOfProcessors(final Settings settings) { return PROCESSORS_SETTING.get(settings); } - public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory, ThreadContext contextHolder, ScheduledExecutorService timer) { + public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory, + ThreadContext contextHolder, ScheduledExecutorService timer) { return new PrioritizedEsThreadPoolExecutor(name, 1, 1, 0L, TimeUnit.MILLISECONDS, threadFactory, contextHolder, timer); } - public static EsThreadPoolExecutor newScaling(String name, int min, int max, long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory, ThreadContext contextHolder) { + public static EsThreadPoolExecutor newScaling(String name, int min, int max, long keepAliveTime, TimeUnit unit, + ThreadFactory threadFactory, ThreadContext contextHolder) { ExecutorScalingQueue queue = new ExecutorScalingQueue<>(); - EsThreadPoolExecutor executor = new EsThreadPoolExecutor(name, min, max, keepAliveTime, unit, queue, threadFactory, new ForceQueuePolicy(), contextHolder); + EsThreadPoolExecutor executor = + new EsThreadPoolExecutor(name, min, max, keepAliveTime, unit, queue, threadFactory, new ForceQueuePolicy(), contextHolder); queue.executor = executor; return executor; } - public static EsThreadPoolExecutor newFixed(String name, int size, int queueCapacity, ThreadFactory threadFactory, ThreadContext contextHolder) { + public static EsThreadPoolExecutor newFixed(String name, int size, int queueCapacity, + ThreadFactory threadFactory, ThreadContext contextHolder) { BlockingQueue queue; if (queueCapacity < 0) { queue = ConcurrentCollections.newBlockingQueue(); } else { queue = new SizeBlockingQueue<>(ConcurrentCollections.newBlockingQueue(), queueCapacity); } - return new EsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS, queue, threadFactory, new EsAbortPolicy(), contextHolder); + return new EsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS, + queue, threadFactory, new EsAbortPolicy(), contextHolder); } /** diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java index 967f0c890d270..0b2b1a5a54c9e 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadBarrier.java @@ -246,18 +246,18 @@ private synchronized void initCause(Throwable t) { *

* Usage example:
*


-     *                                                                                             BarrierTimer timer = new BarrierTimer();
-     *                                                                                             ThreadBarrier barrier = new ThreadBarrier( nTHREADS + 1, timer );
-     *                                                                                             ..
-     *                                                                                             barrier.await(); // starts timer when all threads trip on await
-     *                                                                                             barrier.await(); // stops  timer when all threads trip on await
-     *                                                                                             ..
-     *                                                                                             long time = timer.getTimeInNanos();
-     *                                                                                             long tpi = time / ((long)nREPEATS * nTHREADS); //throughput per thread iteration
-     *                                                                                             long secs = timer.getTimeInSeconds();    //total runtime in seconds
-     *                                                                                             ..
-     *                                                                                             timer.reset();  // reuse timer
-     *                                                                                           
+ * BarrierTimer timer = new BarrierTimer(); + * ThreadBarrier barrier = new ThreadBarrier( nTHREADS + 1, timer ); + * .. + * barrier.await(); // starts timer when all threads trip on await + * barrier.await(); // stops timer when all threads trip on await + * .. + * long time = timer.getTimeInNanos(); + * long tpi = time / ((long)nREPEATS * nTHREADS); //throughput per thread iteration + * long secs = timer.getTimeInSeconds(); //total runtime in seconds + * .. + * timer.reset(); // reuse timer + * */ public static class BarrierTimer implements Runnable { volatile boolean started; diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index f2b1c209cd9dc..9664811149567 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -56,14 +56,15 @@ /** * A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with * a thread. It allows to store and retrieve header information across method calls, network calls as well as threads spawned from a - * thread that has a {@link ThreadContext} associated with. Threads spawned from a {@link org.elasticsearch.threadpool.ThreadPool} have out of the box - * support for {@link ThreadContext} and all threads spawned will inherit the {@link ThreadContext} from the thread that it is forking from.". - * Network calls will also preserve the senders headers automatically. + * thread that has a {@link ThreadContext} associated with. Threads spawned from a {@link org.elasticsearch.threadpool.ThreadPool} + * have out of the box support for {@link ThreadContext} and all threads spawned will inherit the {@link ThreadContext} from the thread + * that it is forking from.". Network calls will also preserve the senders headers automatically. *

- * Consumers of ThreadContext usually don't need to interact with adding or stashing contexts. Every elasticsearch thread is managed by a thread pool or executor - * being responsible for stashing and restoring the threads context. For instance if a network request is received, all headers are deserialized from the network - * and directly added as the headers of the threads {@link ThreadContext} (see {@link #readHeaders(StreamInput)}. In order to not modify the context that is currently - * active on this thread the network code uses a try/with pattern to stash it's current context, read headers into a fresh one and once the request is handled or a handler thread + * Consumers of ThreadContext usually don't need to interact with adding or stashing contexts. Every elasticsearch thread is managed by + * a thread pool or executor being responsible for stashing and restoring the threads context. For instance if a network request is + * received, all headers are deserialized from the network and directly added as the headers of the threads {@link ThreadContext} + * (see {@link #readHeaders(StreamInput)}. In order to not modify the context that is currently active on this thread the network code + * uses a try/with pattern to stash it's current context, read headers into a fresh one and once the request is handled or a handler thread * is forked (which in turn inherits the context) it restores the previous context. For instance: *

*
@@ -127,8 +128,9 @@ public StoredContext stashContext() {
     }
 
     /**
-     * Removes the current context and resets a new context that contains a merge of the current headers and the given headers. The removed context can be
-     * restored when closing the returned {@link StoredContext}. The merge strategy is that headers that are already existing are preserved unless they are defaults.
+     * Removes the current context and resets a new context that contains a merge of the current headers and the given headers.
+     * The removed context can be restored when closing the returned {@link StoredContext}. The merge strategy is that headers
+     * that are already existing are preserved unless they are defaults.
      */
     public StoredContext stashAndMergeHeaders(Map headers) {
         final ThreadContextStruct context = threadLocal.get();
@@ -481,7 +483,8 @@ private ThreadContextStruct putResponse(final String key, final String value, fi
                     logger.warn("Dropping a warning header, as their total size reached the maximum allowed of ["
                             + maxWarningHeaderSize + "] bytes set in ["
                             + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE.getKey() + "]!");
-                    return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, isSystemContext, newWarningHeaderSize);
+                    return new ThreadContextStruct(requestHeaders, responseHeaders,
+                        transientHeaders, isSystemContext, newWarningHeaderSize);
                 }
             }
 
diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java
index 9c01c094b7a0d..d193cfd510823 100644
--- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java
+++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java
@@ -42,7 +42,8 @@ public class XContentHelper {
 
     /**
      * Creates a parser based on the bytes provided
-     * @deprecated use {@link #createParser(NamedXContentRegistry, DeprecationHandler, BytesReference, XContentType)} to avoid content type auto-detection
+     * @deprecated use {@link #createParser(NamedXContentRegistry, DeprecationHandler, BytesReference, XContentType)}
+     * to avoid content type auto-detection
      */
     @Deprecated
     public static XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler,
@@ -109,7 +110,8 @@ public static Tuple> convertToMap(BytesReferen
             }
             contentType = xContentType != null ? xContentType : XContentFactory.xContentType(input);
             try (InputStream stream = input) {
-                return new Tuple<>(Objects.requireNonNull(contentType), convertToMap(XContentFactory.xContent(contentType), stream, ordered));
+                return new Tuple<>(Objects.requireNonNull(contentType),
+                    convertToMap(XContentFactory.xContent(contentType), stream, ordered));
             }
         } catch (IOException e) {
             throw new ElasticsearchParseException("Failed to parse content to map", e);
@@ -294,7 +296,8 @@ private static boolean allListValuesAreMapsOfOne(List list) {
      * auto-detection
      */
     @Deprecated
-    public static void writeRawField(String field, BytesReference source, XContentBuilder builder, ToXContent.Params params) throws IOException {
+    public static void writeRawField(String field, BytesReference source, XContentBuilder builder,
+                                     ToXContent.Params params) throws IOException {
         Compressor compressor = CompressorFactory.compressor(source);
         if (compressor != null) {
             try (InputStream compressedStreamInput = compressor.streamInput(source.streamInput())) {
@@ -340,7 +343,8 @@ public static BytesReference toXContent(ToXContent toXContent, XContentType xCon
      * {@link XContentType}. Wraps the output into a new anonymous object according to the value returned
      * by the {@link ToXContent#isFragment()} method returns.
      */
-    public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, Params params, boolean humanReadable) throws IOException {
+    public static BytesReference toXContent(ToXContent toXContent, XContentType xContentType, Params params,
+                                            boolean humanReadable) throws IOException {
         try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) {
             builder.humanReadable(humanReadable);
             if (toXContent.isFragment()) {
diff --git a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
index 78c3963bd0429..5dcc811b0c6be 100644
--- a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java
@@ -94,7 +94,8 @@ public void testNewPolygon_coordinate() {
 
     public void testNewPolygon_coordinates() {
         Polygon polygon = new PolygonBuilder(new CoordinatesBuilder()
-                .coordinates(new Coordinate(-45, 30), new Coordinate(45, 30), new Coordinate(45, -30), new Coordinate(-45, -30), new Coordinate(-45, 30))
+                .coordinates(new Coordinate(-45, 30), new Coordinate(45, 30),
+                    new Coordinate(45, -30), new Coordinate(-45, -30), new Coordinate(-45, 30))
                 ).toPolygon();
 
         LineString exterior = polygon.getExteriorRing();
diff --git a/server/src/test/java/org/elasticsearch/common/hash/MessageDigestsTests.java b/server/src/test/java/org/elasticsearch/common/hash/MessageDigestsTests.java
index e3c085f032830..ffe81fff5b634 100644
--- a/server/src/test/java/org/elasticsearch/common/hash/MessageDigestsTests.java
+++ b/server/src/test/java/org/elasticsearch/common/hash/MessageDigestsTests.java
@@ -34,8 +34,10 @@ private void assertHash(String expected, String test, MessageDigest messageDiges
     public void testMd5() throws Exception {
         assertHash("d41d8cd98f00b204e9800998ecf8427e", "", MessageDigests.md5());
         assertHash("900150983cd24fb0d6963f7d28e17f72", "abc", MessageDigests.md5());
-        assertHash("8215ef0796a20bcaaae116d3876c664a", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.md5());
-        assertHash("7707d6ae4e027c70eea2a935c2296f21", new String(new char[1000000]).replace("\0", "a"), MessageDigests.md5());
+        assertHash("8215ef0796a20bcaaae116d3876c664a",
+            "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.md5());
+        assertHash("7707d6ae4e027c70eea2a935c2296f21",
+            new String(new char[1000000]).replace("\0", "a"), MessageDigests.md5());
         assertHash("9e107d9d372bb6826bd81d3542a419d6", "The quick brown fox jumps over the lazy dog", MessageDigests.md5());
         assertHash("1055d3e698d289f2af8663725127bd4b", "The quick brown fox jumps over the lazy cog", MessageDigests.md5());
     }
@@ -43,8 +45,10 @@ public void testMd5() throws Exception {
     public void testSha1() throws Exception {
         assertHash("da39a3ee5e6b4b0d3255bfef95601890afd80709", "", MessageDigests.sha1());
         assertHash("a9993e364706816aba3e25717850c26c9cd0d89d", "abc", MessageDigests.sha1());
-        assertHash("84983e441c3bd26ebaae4aa1f95129e5e54670f1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.sha1());
-        assertHash("34aa973cd4c4daa4f61eeb2bdbad27316534016f", new String(new char[1000000]).replace("\0", "a"), MessageDigests.sha1());
+        assertHash("84983e441c3bd26ebaae4aa1f95129e5e54670f1",
+            "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.sha1());
+        assertHash("34aa973cd4c4daa4f61eeb2bdbad27316534016f",
+            new String(new char[1000000]).replace("\0", "a"), MessageDigests.sha1());
         assertHash("2fd4e1c67a2d28fced849ee1bb76e7391b93eb12", "The quick brown fox jumps over the lazy dog", MessageDigests.sha1());
         assertHash("de9f2c7fd25e1b3afad3e85a0bd17d9b100db4b3", "The quick brown fox jumps over the lazy cog", MessageDigests.sha1());
     }
@@ -52,10 +56,14 @@ public void testSha1() throws Exception {
     public void testSha256() throws Exception {
         assertHash("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "", MessageDigests.sha256());
         assertHash("ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", "abc", MessageDigests.sha256());
-        assertHash("248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.sha256());
-        assertHash("cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0", new String(new char[1000000]).replace("\0", "a"), MessageDigests.sha256());
-        assertHash("d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592", "The quick brown fox jumps over the lazy dog", MessageDigests.sha256());
-        assertHash("e4c4d8f3bf76b692de791a173e05321150f7a345b46484fe427f6acc7ecc81be", "The quick brown fox jumps over the lazy cog", MessageDigests.sha256());
+        assertHash("248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1",
+            "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.sha256());
+        assertHash("cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0",
+            new String(new char[1000000]).replace("\0", "a"), MessageDigests.sha256());
+        assertHash("d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592",
+            "The quick brown fox jumps over the lazy dog", MessageDigests.sha256());
+        assertHash("e4c4d8f3bf76b692de791a173e05321150f7a345b46484fe427f6acc7ecc81be",
+            "The quick brown fox jumps over the lazy cog", MessageDigests.sha256());
     }
 
     public void testToHexString() throws Exception {
diff --git a/server/src/test/java/org/elasticsearch/common/network/CidrsTests.java b/server/src/test/java/org/elasticsearch/common/network/CidrsTests.java
index 0b00353f98ab5..60609dc29930f 100644
--- a/server/src/test/java/org/elasticsearch/common/network/CidrsTests.java
+++ b/server/src/test/java/org/elasticsearch/common/network/CidrsTests.java
@@ -78,7 +78,8 @@ public void testSplittingDot() {
     public void testValidSpecificCases() {
         List> cases = new ArrayList<>();
         cases.add(new Tuple<>("192.168.0.0/24", new long[]{(192L << 24) + (168 << 16), (192L << 24) + (168 << 16) + (1 << 8)}));
-        cases.add(new Tuple<>("192.168.128.0/17", new long[]{(192L << 24) + (168 << 16) + (128 << 8), (192L << 24) + (168 << 16) + (128 << 8) + (1 << 15)}));
+        cases.add(new Tuple<>("192.168.128.0/17",
+            new long[]{(192L << 24) + (168 << 16) + (128 << 8), (192L << 24) + (168 << 16) + (128 << 8) + (1 << 15)}));
         cases.add(new Tuple<>("128.0.0.0/1", new long[]{128L << 24, (128L << 24) + (1L << 31)})); // edge case
         cases.add(new Tuple<>("0.0.0.0/0", new long[]{0, 1L << 32})); // edge case
         cases.add(new Tuple<>("0.0.0.0/1", new long[]{0, 1L << 31})); // edge case
diff --git a/server/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java b/server/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
index eafb7c69b8d9d..13480122d2fd8 100644
--- a/server/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
+++ b/server/src/test/java/org/elasticsearch/common/unit/DistanceUnitTests.java
@@ -53,8 +53,10 @@ public void testDistanceUnitParsing() {
         double testValue = 12345.678;
         for (DistanceUnit unit : DistanceUnit.values()) {
             assertThat("Unit can be parsed from '" + unit.toString() + "'", DistanceUnit.fromString(unit.toString()), equalTo(unit));
-            assertThat("Unit can be parsed from '" + testValue + unit.toString() + "'", DistanceUnit.fromString(unit.toString()), equalTo(unit));
-            assertThat("Value can be parsed from '" + testValue + unit.toString() + "'", DistanceUnit.Distance.parseDistance(unit.toString(testValue)).value, equalTo(testValue));
+            assertThat("Unit can be parsed from '" + testValue + unit.toString() + "'",
+                DistanceUnit.fromString(unit.toString()), equalTo(unit));
+            assertThat("Value can be parsed from '" + testValue + unit.toString() + "'",
+                DistanceUnit.Distance.parseDistance(unit.toString(testValue)).value, equalTo(testValue));
         }
     }
 
diff --git a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java
index 520f80fecac44..026c9a2e078a4 100644
--- a/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java
+++ b/server/src/test/java/org/elasticsearch/common/unit/FuzzinessTests.java
@@ -67,7 +67,8 @@ public void testParseFromXContent() throws IOException {
                 try (XContentParser parser = createParser(json)) {
                     assertThat(parser.nextToken(), equalTo(XContentParser.Token.START_OBJECT));
                     assertThat(parser.nextToken(), equalTo(XContentParser.Token.FIELD_NAME));
-                    assertThat(parser.nextToken(), anyOf(equalTo(XContentParser.Token.VALUE_NUMBER), equalTo(XContentParser.Token.VALUE_STRING)));
+                    assertThat(parser.nextToken(), anyOf(equalTo(XContentParser.Token.VALUE_NUMBER),
+                        equalTo(XContentParser.Token.VALUE_STRING)));
                     Fuzziness fuzziness = Fuzziness.parse(parser);
                     if (value.intValue() >= 1) {
                         assertThat(fuzziness.asDistance(), equalTo(Math.min(2, value.intValue())));
diff --git a/server/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java b/server/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java
index 9210565a10482..0c1c5bbbcb74e 100644
--- a/server/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java
+++ b/server/src/test/java/org/elasticsearch/common/util/LongObjectHashMapTests.java
@@ -32,7 +32,8 @@ private BigArrays randombigArrays() {
 
     public void testDuel() {
         final LongObjectHashMap map1 = new LongObjectHashMap<>();
-        final LongObjectPagedHashMap map2 = new LongObjectPagedHashMap<>(randomInt(42), 0.6f + randomFloat() * 0.39f, randombigArrays());
+        final LongObjectPagedHashMap map2 =
+            new LongObjectPagedHashMap<>(randomInt(42), 0.6f + randomFloat() * 0.39f, randombigArrays());
         final int maxKey = randomIntBetween(1, 10000);
         final int iters = scaledRandomIntBetween(10000, 100000);
         for (int i = 0; i < iters; ++i) {
diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java
index a0fdcbf51ca1d..ff916c91613dc 100644
--- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java
+++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java
@@ -172,7 +172,8 @@ public void testScaleUp() throws Exception {
         final ThreadBarrier barrier = new ThreadBarrier(max + 1);
 
         ThreadPoolExecutor pool =
-                EsExecutors.newScaling(getClass().getName() + "/" + getTestName(), min, max, between(1, 100), randomTimeUnit(), EsExecutors.daemonThreadFactory("test"), threadContext);
+                EsExecutors.newScaling(getClass().getName() + "/" + getTestName(), min, max, between(1, 100), randomTimeUnit(),
+                    EsExecutors.daemonThreadFactory("test"), threadContext);
         assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
         assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
 
@@ -209,7 +210,8 @@ public void testScaleDown() throws Exception {
         final ThreadBarrier barrier = new ThreadBarrier(max + 1);
 
         final ThreadPoolExecutor pool =
-                EsExecutors.newScaling(getClass().getName() + "/" + getTestName(), min, max, between(1, 100), TimeUnit.MILLISECONDS, EsExecutors.daemonThreadFactory("test"), threadContext);
+                EsExecutors.newScaling(getClass().getName() + "/" + getTestName(), min, max, between(1, 100), TimeUnit.MILLISECONDS,
+                    EsExecutors.daemonThreadFactory("test"), threadContext);
         assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
         assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
 
diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java
index 1eacb4cb18cee..fa3868ec46f37 100644
--- a/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java
+++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java
@@ -159,7 +159,8 @@ public void testSubmitPrioritizedExecutorWithCallables() throws Exception {
     }
 
     public void testSubmitPrioritizedExecutorWithMixed() throws Exception {
-        ExecutorService executor = EsExecutors.newSinglePrioritizing(getTestName(), EsExecutors.daemonThreadFactory(getTestName()), holder, null);
+        ExecutorService executor = EsExecutors.newSinglePrioritizing(getTestName(),
+            EsExecutors.daemonThreadFactory(getTestName()), holder, null);
         List results = new ArrayList<>(8);
         CountDownLatch awaitingLatch = new CountDownLatch(1);
         CountDownLatch finishedLatch = new CountDownLatch(8);
diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
index 07338d9286b70..a281d453e0764 100644
--- a/server/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java
@@ -123,7 +123,8 @@ public void testRaw() throws IOException {
             xContentBuilder.rawField("foo", new BytesArray("{\"test\":\"value\"}").streamInput());
             xContentBuilder.field("test1", "value1");
             xContentBuilder.endObject();
-            assertThat(Strings.toString(xContentBuilder), equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"},\"test1\":\"value1\"}"));
+            assertThat(Strings.toString(xContentBuilder),
+                equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"},\"test1\":\"value1\"}"));
         }
         {
             XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON);
@@ -133,7 +134,8 @@ public void testRaw() throws IOException {
             xContentBuilder.rawField("foo1", new BytesArray("{\"test\":\"value\"}").streamInput());
             xContentBuilder.field("test1", "value1");
             xContentBuilder.endObject();
-            assertThat(Strings.toString(xContentBuilder), equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"},\"foo1\":{\"test\":\"value\"},\"test1\":\"value1\"}"));
+            assertThat(Strings.toString(xContentBuilder),
+                equalTo("{\"test\":\"value\",\"foo\":{\"test\":\"value\"},\"foo1\":{\"test\":\"value\"},\"test1\":\"value1\"}"));
         }
     }
 
diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java
index b4d7cb11529b3..b0536fa908cf0 100644
--- a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java
+++ b/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java
@@ -34,7 +34,8 @@ public class FilterPathGeneratorFilteringTests extends ESTestCase {
     private final JsonFactory JSON_FACTORY = new JsonFactory();
 
     public void testInclusiveFilters() throws Exception {
-        final String SAMPLE = "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}";
+        final String SAMPLE = "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}";
 
         assertResult(SAMPLE, "a", true, "{'a':0}");
         assertResult(SAMPLE, "b", true, "{'b':true}");
@@ -79,48 +80,80 @@ public void testInclusiveFilters() throws Exception {
     }
 
     public void testExclusiveFilters() throws Exception {
-        final String SAMPLE = "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}";
-
-        assertResult(SAMPLE, "a", false, "{'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "b", false, "{'a':0,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "c", false, "{'a':0,'b':true,'d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "d", false, "{'a':0,'b':true,'c':'c_value','e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        final String SAMPLE = "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}";
+
+        assertResult(SAMPLE, "a", false, "{'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "b", false, "{'a':0,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "c", false, "{'a':0,'b':true,'d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "d", false, "{'a':0,'b':true,'c':'c_value','e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
         assertResult(SAMPLE, "e", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "h", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "z", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-
-        assertResult(SAMPLE, "e.f1", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "e.f2", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value'},{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "e.f*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-        assertResult(SAMPLE, "e.*2", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value'},{'g1':'g1_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
-
-        assertResult(SAMPLE, "h.i", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.j", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.j.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.j.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-
-        assertResult(SAMPLE, "h.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "*.i", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-
-        assertResult(SAMPLE, "*.i.j", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.*.j", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-
-        assertResult(SAMPLE, "*.i.j.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.*.j.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.*.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.j.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-
-        assertResult(SAMPLE, "*.i.j.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.*.j.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.*.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.j.*.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "h.i.j.k.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-
-        assertResult(SAMPLE, "h.*.j.*.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-        assertResult(SAMPLE, "**.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'},{'g1':'g1_value','g2':'g2_value'}]}");
-
-        assertResult(SAMPLE, "**.*2", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value'},{'g1':'g1_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "h", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "z", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+
+        assertResult(SAMPLE, "e.f1", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "e.f2", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "e.f*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'g1':'g1_value','g2':'g2_value'}]," +
+            "'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+        assertResult(SAMPLE, "e.*2", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value'},{'g1':'g1_value'}]," +
+            "'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
+
+        assertResult(SAMPLE, "h.i", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.j", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.j.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.j.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+
+        assertResult(SAMPLE, "h.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "*.i", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+
+        assertResult(SAMPLE, "*.i.j", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.*.j", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+
+        assertResult(SAMPLE, "*.i.j.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.*.j.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.*.k", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.j.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+
+        assertResult(SAMPLE, "*.i.j.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.*.j.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.*.k.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.j.*.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "h.i.j.k.*", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+
+        assertResult(SAMPLE, "h.*.j.*.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+        assertResult(SAMPLE, "**.l", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value','f2':'f2_value'}," +
+            "{'g1':'g1_value','g2':'g2_value'}]}");
+
+        assertResult(SAMPLE, "**.*2", false, "{'a':0,'b':true,'c':'c_value','d':[0,1,2],'e':[{'f1':'f1_value'}," +
+            "{'g1':'g1_value'}],'h':{'i':{'j':{'k':{'l':'l_value'}}}}}");
 
     }
 

From 43f6ba1c6305a722ad90f30f5ea6ba41d6de7fb5 Mon Sep 17 00:00:00 2001
From: Jason Tedor 
Date: Fri, 26 Oct 2018 11:09:55 -0400
Subject: [PATCH 6/9] Fix put/resume follow request parsing (#34913)

This commit adds some fields that were missing from put follow, and
fixes a bug in resume follow.
---
 .../xpack/ccr/action/ShardFollowTask.java              | 10 ++++++----
 .../xpack/core/ccr/action/PutFollowAction.java         | 10 +++++++++-
 2 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java
index f22fe0d2238ac..dc7194aa4e0e5 100644
--- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java
+++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java
@@ -73,14 +73,16 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams {
         PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_READ_REQUEST_OPERATION_COUNT);
         PARSER.declareField(
                 ConstructingObjectParser.constructorArg(),
-                (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_READ_REQUEST_SIZE.getPreferredName()), MAX_READ_REQUEST_SIZE,
+                (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_READ_REQUEST_SIZE.getPreferredName()),
+                MAX_READ_REQUEST_SIZE,
                 ObjectParser.ValueType.STRING);
         PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_OUTSTANDING_READ_REQUESTS);
         PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_WRITE_REQUEST_OPERATION_COUNT);
         PARSER.declareField(
-            ConstructingObjectParser.constructorArg(),
-            (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_BUFFER_SIZE.getPreferredName()), MAX_WRITE_REQUEST_SIZE,
-            ObjectParser.ValueType.STRING);
+                ConstructingObjectParser.constructorArg(),
+                (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_REQUEST_SIZE.getPreferredName()),
+                MAX_WRITE_REQUEST_SIZE,
+                ObjectParser.ValueType.STRING);
         PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_OUTSTANDING_WRITE_REQUESTS);
         PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_WRITE_BUFFER_COUNT);
         PARSER.declareField(
diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java
index b242b8cc8ec4c..5273ee036554e 100644
--- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java
+++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java
@@ -34,6 +34,8 @@
 import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_RETRY_DELAY_FIELD;
 import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_WRITE_BUFFER_COUNT;
 import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_WRITE_BUFFER_SIZE;
+import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_WRITE_REQUEST_OPERATION_COUNT;
+import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_WRITE_REQUEST_SIZE;
 import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.READ_POLL_TIMEOUT;
 
 public final class PutFollowAction extends Action {
@@ -66,12 +68,18 @@ public static class Request extends AcknowledgedRequest implements Indi
             PARSER.declareString(Request::setLeaderIndex, LEADER_INDEX_FIELD);
             PARSER.declareString((req, val) -> req.followRequest.setFollowerIndex(val), FOLLOWER_INDEX_FIELD);
             PARSER.declareInt((req, val) -> req.followRequest.setMaxReadRequestOperationCount(val), MAX_READ_REQUEST_OPERATION_COUNT);
-            PARSER.declareInt((req, val) -> req.followRequest.setMaxOutstandingReadRequests(val), MAX_OUTSTANDING_READ_REQUESTS);
             PARSER.declareField(
                 (req, val) -> req.followRequest.setMaxReadRequestSize(val),
                 (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_READ_REQUEST_SIZE.getPreferredName()),
                 MAX_READ_REQUEST_SIZE,
                 ObjectParser.ValueType.STRING);
+            PARSER.declareInt((req, val) -> req.followRequest.setMaxOutstandingReadRequests(val), MAX_OUTSTANDING_READ_REQUESTS);
+            PARSER.declareInt((req, val) -> req.followRequest.setMaxWriteRequestOperationCount(val), MAX_WRITE_REQUEST_OPERATION_COUNT);
+            PARSER.declareField(
+                    (req, val) -> req.followRequest.setMaxWriteRequestSize(val),
+                    (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_WRITE_REQUEST_SIZE.getPreferredName()),
+                    MAX_WRITE_REQUEST_SIZE,
+                    ObjectParser.ValueType.STRING);
             PARSER.declareInt((req, val) -> req.followRequest.setMaxOutstandingWriteRequests(val), MAX_OUTSTANDING_WRITE_REQUESTS);
             PARSER.declareInt((req, val) -> req.followRequest.setMaxWriteBufferCount(val), MAX_WRITE_BUFFER_COUNT);
             PARSER.declareField(

From 5a7b8c0e697d12f7a96f72f2160e4b286266b263 Mon Sep 17 00:00:00 2001
From: Costin Leau 
Date: Fri, 26 Oct 2018 18:21:14 +0300
Subject: [PATCH 7/9] SQL: Provide null-safe scripts for Not and Neg (#34877)

Introduce null-safe Painless scripts for Not and Neg
Simplify script generation for Unary functions

Close #34848
---
 .../sql/expression/function/scalar/Cast.java  |  9 +--
 .../function/scalar/UnaryScalarFunction.java  | 12 ++++
 .../scalar/datetime/DateTimeFunction.java     | 12 ++--
 .../datetime/NamedDateTimeFunction.java       |  8 +--
 .../function/scalar/datetime/Quarter.java     |  8 +--
 .../function/scalar/math/MathFunction.java    |  8 +--
 .../scalar/string/UnaryStringFunction.java    |  8 +--
 .../scalar/string/UnaryStringIntFunction.java | 18 +++---
 .../whitelist/InternalSqlScriptUtils.java     | 27 +++++++--
 .../sql/expression/gen/script/Scripts.java    | 12 ++--
 .../sql/expression/predicate/IsNotNull.java   | 32 ++++------
 .../predicate/IsNotNullProcessor.java         | 51 ++++++++++++++++
 .../logical/BinaryLogicProcessor.java         |  4 +-
 .../sql/expression/predicate/logical/Not.java | 19 +++---
 .../predicate/logical/NotProcessor.java       | 60 +++++++++++++++++++
 .../predicate/operator/arithmetic/Neg.java    | 14 ++---
 .../xpack/sql/planner/QueryTranslator.java    | 43 ++++++++-----
 .../xpack/sql/plugin/sql_whitelist.txt        |  5 +-
 x-pack/qa/sql/src/main/resources/agg.sql-spec |  7 +++
 19 files changed, 250 insertions(+), 107 deletions(-)
 create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNullProcessor.java
 create mode 100644 x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/NotProcessor.java

diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java
index a8dfe43174911..298039640446e 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/Cast.java
@@ -6,9 +6,7 @@
 package org.elasticsearch.xpack.sql.expression.function.scalar;
 
 import org.elasticsearch.xpack.sql.expression.Expression;
-import org.elasticsearch.xpack.sql.expression.Expressions;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.UnaryPipe;
+import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
 import org.elasticsearch.xpack.sql.tree.Location;
 import org.elasticsearch.xpack.sql.tree.NodeInfo;
 import org.elasticsearch.xpack.sql.type.DataType;
@@ -71,9 +69,8 @@ protected TypeResolution resolveType() {
     }
 
     @Override
-    protected Pipe makePipe() {
-        return new UnaryPipe(location(), this, Expressions.pipe(field()),
-                new CastProcessor(DataTypeConversion.conversionFor(from(), to())));
+    protected Processor makeProcessor() {
+        return new CastProcessor(DataTypeConversion.conversionFor(from(), to()));
     }
 
     @Override
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/UnaryScalarFunction.java
index 54fe2e834db02..1b639287a1289 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/UnaryScalarFunction.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/UnaryScalarFunction.java
@@ -6,6 +6,10 @@
 package org.elasticsearch.xpack.sql.expression.function.scalar;
 
 import org.elasticsearch.xpack.sql.expression.Expression;
+import org.elasticsearch.xpack.sql.expression.Expressions;
+import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
+import org.elasticsearch.xpack.sql.expression.gen.pipeline.UnaryPipe;
+import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
 import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate;
 import org.elasticsearch.xpack.sql.tree.Location;
 
@@ -34,12 +38,20 @@ public final UnaryScalarFunction replaceChildren(List newChildren) {
         }
         return replaceChild(newChildren.get(0));
     }
+
     protected abstract UnaryScalarFunction replaceChild(Expression newChild);
 
     public Expression field() {
         return field;
     }
 
+    @Override
+    public final Pipe makePipe() {
+        return new UnaryPipe(location(), this, Expressions.pipe(field()), makeProcessor());
+    }
+
+    protected abstract Processor makeProcessor();
+
     @Override
     public boolean foldable() {
         return field.foldable();
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java
index fbb095f2f00b5..8d5a384b1f456 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/DateTimeFunction.java
@@ -6,11 +6,9 @@
 package org.elasticsearch.xpack.sql.expression.function.scalar.datetime;
 
 import org.elasticsearch.xpack.sql.expression.Expression;
-import org.elasticsearch.xpack.sql.expression.Expressions;
 import org.elasticsearch.xpack.sql.expression.FieldAttribute;
 import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor.DateTimeExtractor;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.UnaryPipe;
+import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
 import org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder;
 import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate;
 import org.elasticsearch.xpack.sql.tree.Location;
@@ -63,13 +61,13 @@ public ScriptTemplate scriptWithField(FieldAttribute field) {
      */
     protected abstract ChronoField chronoField();
 
+    protected abstract DateTimeExtractor extractor();
+
     @Override
-    protected Pipe makePipe() {
-        return new UnaryPipe(location(), this, Expressions.pipe(field()), new DateTimeProcessor(extractor(), timeZone()));
+    protected Processor makeProcessor() {
+        return new DateTimeProcessor(extractor(), timeZone());
     }
 
-    protected abstract DateTimeExtractor extractor();
-
     @Override
     public DataType dataType() {
         return DataType.INTEGER;
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java
index ed43996fe8e33..a8e6e02057a22 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/NamedDateTimeFunction.java
@@ -6,11 +6,9 @@
 package org.elasticsearch.xpack.sql.expression.function.scalar.datetime;
 
 import org.elasticsearch.xpack.sql.expression.Expression;
-import org.elasticsearch.xpack.sql.expression.Expressions;
 import org.elasticsearch.xpack.sql.expression.FieldAttribute;
 import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.NamedDateTimeProcessor.NameExtractor;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.UnaryPipe;
+import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
 import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate;
 import org.elasticsearch.xpack.sql.tree.Location;
 import org.elasticsearch.xpack.sql.type.DataType;
@@ -51,8 +49,8 @@ public ScriptTemplate scriptWithField(FieldAttribute field) {
     }
 
     @Override
-    protected final Pipe makePipe() {
-        return new UnaryPipe(location(), this, Expressions.pipe(field()), new NamedDateTimeProcessor(nameExtractor, timeZone()));
+    protected Processor makeProcessor() {
+        return new NamedDateTimeProcessor(nameExtractor, timeZone());
     }
 
     @Override
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java
index c9a1d4ee721aa..51b9501c6eb00 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/datetime/Quarter.java
@@ -7,10 +7,8 @@
 package org.elasticsearch.xpack.sql.expression.function.scalar.datetime;
 
 import org.elasticsearch.xpack.sql.expression.Expression;
-import org.elasticsearch.xpack.sql.expression.Expressions;
 import org.elasticsearch.xpack.sql.expression.FieldAttribute;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.UnaryPipe;
+import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
 import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate;
 import org.elasticsearch.xpack.sql.tree.Location;
 import org.elasticsearch.xpack.sql.tree.NodeInfo.NodeCtor2;
@@ -53,8 +51,8 @@ protected Quarter replaceChild(Expression newChild) {
     }
 
     @Override
-    protected Pipe makePipe() {
-        return new UnaryPipe(location(), this, Expressions.pipe(field()), new QuarterProcessor(timeZone()));
+    protected Processor makeProcessor() {
+        return new QuarterProcessor(timeZone());
     }
 
     @Override
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java
index 4b7ed1e74429a..cd37e539bfcd9 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/math/MathFunction.java
@@ -6,11 +6,9 @@
 package org.elasticsearch.xpack.sql.expression.function.scalar.math;
 
 import org.elasticsearch.xpack.sql.expression.Expression;
-import org.elasticsearch.xpack.sql.expression.Expressions;
 import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction;
 import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor.MathOperation;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.UnaryPipe;
+import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
 import org.elasticsearch.xpack.sql.tree.Location;
 import org.elasticsearch.xpack.sql.type.DataType;
 
@@ -64,8 +62,8 @@ protected TypeResolution resolveType() {
     }
 
     @Override
-    protected final Pipe makePipe() {
-        return new UnaryPipe(location(), this, Expressions.pipe(field()), new MathProcessor(operation()));
+    protected Processor makeProcessor() {
+        return new MathProcessor(operation());
     }
 
     protected abstract MathOperation operation();
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java
index d387fe7e4a1bf..af9bd05fd15cc 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringFunction.java
@@ -6,12 +6,10 @@
 package org.elasticsearch.xpack.sql.expression.function.scalar.string;
 
 import org.elasticsearch.xpack.sql.expression.Expression;
-import org.elasticsearch.xpack.sql.expression.Expressions;
 import org.elasticsearch.xpack.sql.expression.FieldAttribute;
 import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction;
 import org.elasticsearch.xpack.sql.expression.function.scalar.string.StringProcessor.StringOperation;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.UnaryPipe;
+import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
 import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate;
 import org.elasticsearch.xpack.sql.tree.Location;
 import org.elasticsearch.xpack.sql.util.StringUtils;
@@ -49,8 +47,8 @@ protected TypeResolution resolveType() {
     }
 
     @Override
-    protected final Pipe makePipe() {
-        return new UnaryPipe(location(), this, Expressions.pipe(field()), new StringProcessor(operation()));
+    protected Processor makeProcessor() {
+        return new StringProcessor(operation());
     }
 
     protected abstract StringOperation operation();
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java
index 613b37dd7e8b5..0753af03f147f 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/string/UnaryStringIntFunction.java
@@ -6,12 +6,10 @@
 package org.elasticsearch.xpack.sql.expression.function.scalar.string;
 
 import org.elasticsearch.xpack.sql.expression.Expression;
-import org.elasticsearch.xpack.sql.expression.Expressions;
 import org.elasticsearch.xpack.sql.expression.FieldAttribute;
 import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction;
 import org.elasticsearch.xpack.sql.expression.function.scalar.string.StringProcessor.StringOperation;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.UnaryPipe;
+import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
 import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate;
 import org.elasticsearch.xpack.sql.tree.Location;
 
@@ -51,8 +49,8 @@ protected TypeResolution resolveType() {
     }
 
     @Override
-    protected final Pipe makePipe() {
-        return new UnaryPipe(location(), this, Expressions.pipe(field()), new StringProcessor(operation()));
+    protected Processor makeProcessor() {
+        return new StringProcessor(operation());
     }
 
     protected abstract StringOperation operation();
@@ -72,6 +70,11 @@ public String processScript(String template) {
                         template));
     }
 
+    @Override
+    public int hashCode() {
+        return Objects.hash(field());
+    }
+
     @Override
     public boolean equals(Object obj) {
         if (obj == null || obj.getClass() != getClass()) {
@@ -80,9 +83,4 @@ public boolean equals(Object obj) {
         UnaryStringIntFunction other = (UnaryStringIntFunction) obj;
         return Objects.equals(other.field(), field());
     }
-
-    @Override
-    public int hashCode() {
-        return Objects.hash(field());
-    }
 }
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java
index c6f445c0590e6..9aabb3f10ecdc 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/scalar/whitelist/InternalSqlScriptUtils.java
@@ -21,8 +21,11 @@
 import org.elasticsearch.xpack.sql.expression.function.scalar.string.ReplaceFunctionProcessor;
 import org.elasticsearch.xpack.sql.expression.function.scalar.string.StringProcessor.StringOperation;
 import org.elasticsearch.xpack.sql.expression.function.scalar.string.SubstringFunctionProcessor;
+import org.elasticsearch.xpack.sql.expression.predicate.IsNotNullProcessor;
 import org.elasticsearch.xpack.sql.expression.predicate.logical.BinaryLogicProcessor.BinaryLogicOperation;
+import org.elasticsearch.xpack.sql.expression.predicate.logical.NotProcessor;
 import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.BinaryArithmeticProcessor.BinaryArithmeticOperation;
+import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.UnaryArithmeticProcessor.UnaryArithmeticOperation;
 import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.BinaryComparisonProcessor.BinaryComparisonOperation;
 import org.elasticsearch.xpack.sql.expression.predicate.regex.RegexProcessor.RegexOperation;
 import org.elasticsearch.xpack.sql.util.StringUtils;
@@ -102,6 +105,14 @@ public static Boolean or(Boolean left, Boolean right) {
         return BinaryLogicOperation.OR.apply(left, right);
     }
 
+    public static Boolean not(Boolean expression) {
+        return NotProcessor.apply(expression);
+    }
+
+    public static Boolean notNull(Object expression) {
+        return IsNotNullProcessor.apply(expression);
+    }
+
     //
     // Regex
     //
@@ -116,20 +127,24 @@ public static Number add(Number left, Number right) {
         return BinaryArithmeticOperation.ADD.apply(left, right);
     }
 
-    public static Number sub(Number left, Number right) {
-        return BinaryArithmeticOperation.SUB.apply(left, right);
+    public static Number div(Number left, Number right) {
+        return BinaryArithmeticOperation.DIV.apply(left, right);
+    }
+
+    public static Number mod(Number left, Number right) {
+        return BinaryArithmeticOperation.MOD.apply(left, right);
     }
 
     public static Number mul(Number left, Number right) {
         return BinaryArithmeticOperation.MUL.apply(left, right);
     }
 
-    public static Number div(Number left, Number right) {
-        return BinaryArithmeticOperation.DIV.apply(left, right);
+    public static Number neg(Number value) {
+        return UnaryArithmeticOperation.NEGATE.apply(value);
     }
 
-    public static Number mod(Number left, Number right) {
-        return BinaryArithmeticOperation.MOD.apply(left, right);
+    public static Number sub(Number left, Number right) {
+        return BinaryArithmeticOperation.SUB.apply(left, right);
     }
 
     public static Number round(Number v, Number s) {
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Scripts.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Scripts.java
index 69ad3661dc5bb..f9e2588a9c035 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Scripts.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/gen/script/Scripts.java
@@ -24,12 +24,16 @@
 
 public final class Scripts {
 
+    public static final String DOC_VALUE = "doc[{}].value";
+    public static final String SQL_SCRIPTS = "{sql}";
+    public static final String PARAM = "{}";
+
     private Scripts() {}
 
     private static final Map FORMATTING_PATTERNS = Collections.unmodifiableMap(Stream.of(
-            new SimpleEntry<>("doc[{}].value", "{sql}.docValue(doc,{})"),
-            new SimpleEntry<>("{sql}", InternalSqlScriptUtils.class.getSimpleName()),
-            new SimpleEntry<>("{}", "params.%s"))
+            new SimpleEntry<>(DOC_VALUE, SQL_SCRIPTS + ".docValue(doc,{})"),
+            new SimpleEntry<>(SQL_SCRIPTS, InternalSqlScriptUtils.class.getSimpleName()),
+            new SimpleEntry<>(PARAM, "params.%s"))
             .collect(toMap(e -> Pattern.compile(e.getKey(), Pattern.LITERAL), Map.Entry::getValue, (a, b) -> a, LinkedHashMap::new)));
 
     /**
@@ -83,4 +87,4 @@ public static ScriptTemplate binaryMethod(String methodName, ScriptTemplate left
                     .build(),
                 dataType);
     }
-}
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNull.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNull.java
index cabca2aaf2dd6..bd3fd5bf0811b 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNull.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNull.java
@@ -5,25 +5,24 @@
  */
 package org.elasticsearch.xpack.sql.expression.predicate;
 
-import org.elasticsearch.xpack.sql.SqlIllegalArgumentException;
 import org.elasticsearch.xpack.sql.expression.Expression;
-import org.elasticsearch.xpack.sql.expression.UnaryExpression;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
-import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate;
+import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction;
+import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
+import org.elasticsearch.xpack.sql.expression.gen.script.Scripts;
 import org.elasticsearch.xpack.sql.tree.Location;
 import org.elasticsearch.xpack.sql.tree.NodeInfo;
 import org.elasticsearch.xpack.sql.type.DataType;
 import org.elasticsearch.xpack.sql.type.DataTypes;
 
-public class IsNotNull extends UnaryExpression {
+public class IsNotNull extends UnaryScalarFunction {
 
-    public IsNotNull(Location location, Expression child) {
-        super(location, child);
+    public IsNotNull(Location location, Expression field) {
+        super(location, field);
     }
 
     @Override
     protected NodeInfo info() {
-        return NodeInfo.create(this, IsNotNull::new, child());
+        return NodeInfo.create(this, IsNotNull::new, field());
     }
 
     @Override
@@ -33,17 +32,17 @@ protected IsNotNull replaceChild(Expression newChild) {
 
     @Override
     public Object fold() {
-        return child().fold() != null && !DataTypes.isNull(child().dataType());
+        return field().fold() != null && !DataTypes.isNull(field().dataType());
     }
 
     @Override
-    protected Pipe makePipe() {
-        throw new SqlIllegalArgumentException("Not supported yet");
+    protected Processor makeProcessor() {
+        return IsNotNullProcessor.INSTANCE;
     }
 
     @Override
-    public ScriptTemplate asScript() {
-        throw new SqlIllegalArgumentException("Not supported yet");
+    public String processScript(String script) {
+        return Scripts.formatTemplate(Scripts.SQL_SCRIPTS + ".notNull(" + script + ")");
     }
 
     @Override
@@ -55,9 +54,4 @@ public boolean nullable() {
     public DataType dataType() {
         return DataType.BOOLEAN;
     }
-
-    @Override
-    public String toString() {
-        return child().toString() + " IS NOT NULL";
-    }
-}
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNullProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNullProcessor.java
new file mode 100644
index 0000000000000..b29ae263f3907
--- /dev/null
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/IsNotNullProcessor.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.sql.expression.predicate;
+
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
+
+import java.io.IOException;
+
+public class IsNotNullProcessor implements Processor {
+    
+    static final IsNotNullProcessor INSTANCE = new IsNotNullProcessor();
+
+    public static final String NAME = "inn";
+
+    private IsNotNullProcessor() {}
+
+    @Override
+    public String getWriteableName() {
+        return NAME;
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {}
+
+    @Override
+    public Object process(Object input) {
+        return apply(input);
+    }
+
+    public static Boolean apply(Object input) {
+        return input != null ? Boolean.TRUE : Boolean.FALSE;
+    }
+
+    @Override
+    public int hashCode() {
+        return IsNotNullProcessor.class.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+
+        return obj == null || getClass() != obj.getClass();
+    }
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogicProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogicProcessor.java
index 7e3aef2b8c721..334a80b7d578d 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogicProcessor.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/BinaryLogicProcessor.java
@@ -26,7 +26,7 @@ public enum BinaryLogicOperation implements PredicateBiFunction {
             if (Boolean.TRUE.equals(l) || Boolean.TRUE.equals(r)) {
@@ -35,7 +35,7 @@ public enum BinaryLogicOperation implements PredicateBiFunction process;
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java
index 48a307fa0621a..55115ffb4df11 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/Not.java
@@ -5,19 +5,16 @@
  */
 package org.elasticsearch.xpack.sql.expression.predicate.logical;
 
-import org.elasticsearch.xpack.sql.SqlIllegalArgumentException;
 import org.elasticsearch.xpack.sql.expression.Expression;
 import org.elasticsearch.xpack.sql.expression.Expressions;
 import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
-import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate;
+import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
+import org.elasticsearch.xpack.sql.expression.gen.script.Scripts;
 import org.elasticsearch.xpack.sql.expression.predicate.BinaryOperator.Negateable;
 import org.elasticsearch.xpack.sql.tree.Location;
 import org.elasticsearch.xpack.sql.tree.NodeInfo;
 import org.elasticsearch.xpack.sql.type.DataType;
 
-import java.util.Objects;
-
 public class Not extends UnaryScalarFunction {
 
     public Not(Location location, Expression child) {
@@ -45,17 +42,17 @@ protected TypeResolution resolveType() {
 
     @Override
     public Object fold() {
-        return Objects.equals(field().fold(), Boolean.TRUE) ? Boolean.FALSE : Boolean.TRUE;
+        return NotProcessor.INSTANCE.process(field().fold());
     }
 
     @Override
-    protected Pipe makePipe() {
-        throw new SqlIllegalArgumentException("Not supported yet");
+    protected Processor makeProcessor() {
+        return NotProcessor.INSTANCE;
     }
 
     @Override
-    public ScriptTemplate asScript() {
-        throw new SqlIllegalArgumentException("Not supported yet");
+    public String processScript(String script) {
+        return Scripts.formatTemplate(Scripts.SQL_SCRIPTS + ".not(" + script + ")");
     }
 
     @Override
@@ -71,4 +68,4 @@ protected Expression canonicalize() {
     public DataType dataType() {
         return DataType.BOOLEAN;
     }
-}
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/NotProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/NotProcessor.java
new file mode 100644
index 0000000000000..14425d35578ac
--- /dev/null
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/logical/NotProcessor.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License;
+ * you may not use this file except in compliance with the Elastic License.
+ */
+package org.elasticsearch.xpack.sql.expression.predicate.logical;
+
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.xpack.sql.SqlIllegalArgumentException;
+import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
+
+import java.io.IOException;
+
+public class NotProcessor implements Processor {
+    
+    static final NotProcessor INSTANCE = new NotProcessor();
+
+    public static final String NAME = "ln";
+
+    private NotProcessor() {}
+
+    @Override
+    public String getWriteableName() {
+        return NAME;
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {}
+
+    @Override
+    public Object process(Object input) {
+        return apply(input);
+    }
+
+    public static Boolean apply(Object input) {
+        if (input == null) {
+            return null;
+        }
+        
+        if (!(input instanceof Boolean)) {
+            throw new SqlIllegalArgumentException("A boolean is required; received {}", input);
+        }
+
+        return ((Boolean) input).booleanValue() ? Boolean.FALSE : Boolean.TRUE;
+    }
+
+    @Override
+    public int hashCode() {
+        return NotProcessor.class.hashCode();
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+
+        return obj == null || getClass() != obj.getClass();
+    }
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java
index c5758b787f0ff..47ea773f514fa 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java
@@ -9,9 +9,9 @@
 import org.elasticsearch.xpack.sql.expression.Expressions;
 import org.elasticsearch.xpack.sql.expression.NamedExpression;
 import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe;
-import org.elasticsearch.xpack.sql.expression.gen.pipeline.UnaryPipe;
+import org.elasticsearch.xpack.sql.expression.gen.processor.Processor;
 import org.elasticsearch.xpack.sql.expression.gen.script.ScriptWeaver;
+import org.elasticsearch.xpack.sql.expression.gen.script.Scripts;
 import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.UnaryArithmeticProcessor.UnaryArithmeticOperation;
 import org.elasticsearch.xpack.sql.tree.Location;
 import org.elasticsearch.xpack.sql.tree.NodeInfo;
@@ -57,12 +57,12 @@ public String name() {
     }
 
     @Override
-    public String processScript(String template) {
-        return super.processScript("-" + template);
+    public String processScript(String script) {
+        return Scripts.formatTemplate(Scripts.SQL_SCRIPTS + ".neg(" + script + ")");
     }
 
     @Override
-    protected Pipe makePipe() {
-        return new UnaryPipe(location(), this, Expressions.pipe(field()), new UnaryArithmeticProcessor(UnaryArithmeticOperation.NEGATE));
+    protected Processor makeProcessor() {
+        return new UnaryArithmeticProcessor(UnaryArithmeticOperation.NEGATE);
     }
-}
+}
\ No newline at end of file
diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java
index 453660f07da8a..9fcd542ef631d 100644
--- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java
+++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java
@@ -13,7 +13,6 @@
 import org.elasticsearch.xpack.sql.expression.FieldAttribute;
 import org.elasticsearch.xpack.sql.expression.Literal;
 import org.elasticsearch.xpack.sql.expression.NamedExpression;
-import org.elasticsearch.xpack.sql.expression.UnaryExpression;
 import org.elasticsearch.xpack.sql.expression.function.Function;
 import org.elasticsearch.xpack.sql.expression.function.Functions;
 import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction;
@@ -354,11 +353,6 @@ static Query or(Location loc, Query left, Query right) {
         return new BoolQuery(loc, false, left, right);
     }
 
-    static Query not(Query query) {
-        Check.isTrue(query != null, "Expressions is null");
-        return new NotQuery(query.location(), query);
-    }
-
     static String nameOf(Expression e) {
         if (e instanceof DateTimeFunction) {
             return nameOf(((DateTimeFunction) e).field());
@@ -484,20 +478,41 @@ static class Nots extends ExpressionTranslator {
 
         @Override
         protected QueryTranslation asQuery(Not not, boolean onAggs) {
-            QueryTranslation translation = toQuery(not.field(), onAggs);
-            return new QueryTranslation(not(translation.query), translation.aggFilter);
+            Query query = null;
+            AggFilter aggFilter = null;
+
+            if (onAggs) {
+                aggFilter = new AggFilter(not.id().toString(), not.asScript());
+            } else {
+                query = new NotQuery(not.location(), toQuery(not.field(), false).query);
+                // query directly on the field
+                if (not.field() instanceof FieldAttribute) {
+                    query = wrapIfNested(query, not.field());
+                }
+            }
+
+            return new QueryTranslation(query, aggFilter);
         }
     }
 
-    static class Nulls extends ExpressionTranslator {
+    static class Nulls extends ExpressionTranslator {
 
         @Override
-        protected QueryTranslation asQuery(UnaryExpression ue, boolean onAggs) {
-            // TODO: handle onAggs - missing bucket aggregation
-            if (ue instanceof IsNotNull) {
-                return new QueryTranslation(new ExistsQuery(ue.location(), nameOf(ue.child())));
+        protected QueryTranslation asQuery(IsNotNull inn, boolean onAggs) {
+            Query query = null;
+            AggFilter aggFilter = null;
+
+            if (onAggs) {
+                aggFilter = new AggFilter(inn.id().toString(), inn.asScript());
+            } else {
+                query = new ExistsQuery(inn.location(), nameOf(inn.field()));
+                // query directly on the field
+                if (inn.field() instanceof NamedExpression) {
+                    query = wrapIfNested(query, inn.field());
+                }
             }
-            return null;
+
+            return new QueryTranslation(query, aggFilter);
         }
     }
 
diff --git a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt
index ea229940193a3..998dab84783f0 100644
--- a/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt
+++ b/x-pack/plugin/sql/src/main/resources/org/elasticsearch/xpack/sql/plugin/sql_whitelist.txt
@@ -29,7 +29,9 @@ class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalS
 # Logical
 #
   Boolean and(Boolean, Boolean)
-  Boolean or(Boolean, Boolean)    
+  Boolean or(Boolean, Boolean)
+  Boolean not(Boolean)
+  Boolean notNull(Object)    
 
 #
 # Regex
@@ -43,6 +45,7 @@ class org.elasticsearch.xpack.sql.expression.function.scalar.whitelist.InternalS
   Number div(Number, Number)
   Number mod(Number, Number)
   Number mul(Number, Number)
+  Number neg(Number)
   Number sub(Number, Number)
   Number round(Number, Number)
   Number truncate(Number, Number)
diff --git a/x-pack/qa/sql/src/main/resources/agg.sql-spec b/x-pack/qa/sql/src/main/resources/agg.sql-spec
index dab4c386a55ba..2fafb75d69bb5 100644
--- a/x-pack/qa/sql/src/main/resources/agg.sql-spec
+++ b/x-pack/qa/sql/src/main/resources/agg.sql-spec
@@ -456,6 +456,13 @@ selectHireDateGroupByHireDate
 SELECT hire_date HD, COUNT(*) c FROM test_emp GROUP BY hire_date ORDER BY hire_date DESC;
 selectSalaryGroupBySalary
 SELECT salary, COUNT(*) c FROM test_emp GROUP BY salary ORDER BY salary DESC;
+selectLangGroupByLangHavingCountIsNotNull
+SELECT languages, COUNT(*) c FROM test_emp GROUP BY languages HAVING COUNT(*) IS NOT NULL ORDER BY languages DESC;
+selectLangGroupByLangHavingNotEquality
+SELECT languages, COUNT(*) c FROM test_emp GROUP BY languages HAVING NOT COUNT(*) = 1 ORDER BY languages DESC;
+selectLangGroupByLangHavingDifferent
+SELECT languages, COUNT(*) c FROM test_emp GROUP BY languages HAVING COUNT(*) <> 1 ORDER BY languages DESC;
+
 
 // filter with IN
 aggMultiWithHavingUsingInAndNullHandling

From a0279bc069fbf5024a2bd36b93df9fed706700f7 Mon Sep 17 00:00:00 2001
From: Jay Modi 
Date: Fri, 26 Oct 2018 09:21:54 -0600
Subject: [PATCH 8/9] Responses can use Writeable.Reader interface (#34655)

In order to remove Streamable from the codebase, Response objects need
to be read using the Writeable.Reader interface which this change
enables. This change enables the use of Writeable.Reader by adding the
`Action#getResponseReader` method. The default implementation simply
uses the existing `newResponse` method and the readFrom method. As
responses are migrated to the Writeable.Reader interface, Action
classes can be updated to throw an UnsupportedOperationException when
`newResponse` is called and override the `getResponseReader` method.

Relates #34389
---
 .../netty4/Netty4ScheduledPingTests.java      |   3 +-
 .../java/org/elasticsearch/action/Action.java |  15 ++
 .../action/ActionListenerResponseHandler.java |  18 +-
 .../elasticsearch/action/ActionResponse.java  |   7 +
 .../action/TransportActionNodeProxy.java      |   2 +-
 .../tasks/get/TransportGetTaskAction.java     |   7 +-
 .../shards/ClusterSearchShardsAction.java     |   8 +-
 .../shards/ClusterSearchShardsResponse.java   |  58 +++----
 .../TransportClusterSearchShardsAction.java   |   9 +-
 .../action/ingest/IngestActionForwarder.java  |   2 +-
 .../TransportResyncReplicationAction.java     |   8 +-
 .../action/search/MultiSearchResponse.java    |   4 +
 .../action/search/SearchTransportService.java |  35 ++--
 .../broadcast/TransportBroadcastAction.java   |   7 +-
 .../node/TransportBroadcastByNodeAction.java  |   6 +-
 .../master/TransportMasterNodeAction.java     |  44 +++--
 .../support/nodes/TransportNodesAction.java   |   8 +-
 .../TransportReplicationAction.java           |  22 ++-
 ...ransportInstanceSingleOperationAction.java |   8 +-
 .../shard/TransportSingleShardAction.java     |  13 +-
 .../support/tasks/TransportTasksAction.java   |   6 +-
 .../TransportClientNodesService.java          |   6 +-
 .../discovery/zen/MasterFaultDetection.java   |  14 +-
 .../discovery/zen/NodesFaultDetection.java    |  14 +-
 .../gateway/LocalAllocateDangledIndices.java  |   6 +-
 .../indices/flush/SyncedFlushService.java     |  18 +-
 .../recovery/PeerRecoveryTargetService.java   |   7 +-
 .../RecoveryTranslogOperationsResponse.java   |   6 +-
 .../elasticsearch/search/SearchService.java   |   4 +
 .../search/dfs/DfsSearchResult.java           |   4 +
 .../search/fetch/FetchSearchResult.java       |   4 +
 .../search/fetch/QueryFetchSearchResult.java  |   4 +
 .../fetch/ScrollQueryFetchSearchResult.java   |   4 +
 .../search/query/QuerySearchResult.java       |   4 +
 .../search/query/ScrollQuerySearchResult.java |   4 +
 .../EmptyTransportResponseHandler.java        |   3 +-
 .../transport/RemoteClusterAwareClient.java   |   2 +-
 .../transport/RemoteClusterConnection.java    |  10 +-
 .../elasticsearch/transport/TcpTransport.java |  24 ++-
 .../elasticsearch/transport/Transport.java    |  19 ++-
 .../transport/TransportActionProxy.java       |  56 +++---
 .../TransportChannelResponseHandler.java      |  13 +-
 .../transport/TransportMessage.java           |  13 ++
 .../transport/TransportResponse.java          |  19 +++
 .../transport/TransportResponseHandler.java   |  24 ---
 .../transport/TransportService.java           |  24 ++-
 .../ClusterSearchShardsResponseTests.java     |   3 +-
 .../search/TransportSearchActionTests.java    |   2 +-
 .../TransportClientNodesServiceTests.java     |   6 +-
 .../RemoteClusterConnectionTests.java         |  12 +-
 .../transport/TransportActionProxyTests.java  |  42 ++---
 .../test/transport/CapturingTransport.java    |   7 +-
 .../AbstractSimpleTransportTestCase.java      | 160 ++++++++++--------
 ...curityServerTransportInterceptorTests.java |   5 +-
 ...ServerTransportFilterIntegrationTests.java |   9 +-
 55 files changed, 499 insertions(+), 343 deletions(-)

diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java
index fae4082e81828..0f3185add0833 100644
--- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java
+++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java
@@ -20,6 +20,7 @@
 
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.lease.Releasables;
 import org.elasticsearch.common.network.NetworkService;
 import org.elasticsearch.common.settings.Settings;
@@ -102,7 +103,7 @@ public void testScheduledPing() throws Exception {
                 TransportRequest.Empty.INSTANCE, TransportRequestOptions.builder().withCompress(randomBoolean()).build(),
                 new TransportResponseHandler() {
                     @Override
-                    public TransportResponse.Empty newInstance() {
+                    public TransportResponse.Empty read(StreamInput in) {
                         return TransportResponse.Empty.INSTANCE;
                     }
 
diff --git a/server/src/main/java/org/elasticsearch/action/Action.java b/server/src/main/java/org/elasticsearch/action/Action.java
index 771762ad15c30..f0df6202072a4 100644
--- a/server/src/main/java/org/elasticsearch/action/Action.java
+++ b/server/src/main/java/org/elasticsearch/action/Action.java
@@ -19,6 +19,7 @@
 
 package org.elasticsearch.action;
 
+import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.transport.TransportRequestOptions;
 
@@ -45,9 +46,23 @@ public String name() {
 
     /**
      * Creates a new response instance.
+     * @deprecated Implement {@link #getResponseReader()} instead and make this method throw an
+     *             {@link UnsupportedOperationException}
      */
+    @Deprecated
     public abstract Response newResponse();
 
+    /**
+     * Get a reader that can create a new instance of the class from a {@link org.elasticsearch.common.io.stream.StreamInput}
+     */
+    public Writeable.Reader getResponseReader() {
+        return in -> {
+            Response response = newResponse();
+            response.readFrom(in);
+            return response;
+        };
+    }
+
     /**
      * Optional request options for the action.
      */
diff --git a/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java b/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java
index f258be3a16137..432cef6ad3029 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java
@@ -19,13 +19,15 @@
 
 package org.elasticsearch.action;
 
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.transport.TransportResponseHandler;
 import org.elasticsearch.transport.TransportException;
 import org.elasticsearch.transport.TransportResponse;
 
+import java.io.IOException;
 import java.util.Objects;
-import java.util.function.Supplier;
 
 /**
  * A simple base class for action response listeners, defaulting to using the SAME executor (as its
@@ -34,11 +36,11 @@
 public class ActionListenerResponseHandler implements TransportResponseHandler {
 
     private final ActionListener listener;
-    private final Supplier responseSupplier;
+    private final Writeable.Reader reader;
 
-    public ActionListenerResponseHandler(ActionListener listener, Supplier responseSupplier) {
+    public ActionListenerResponseHandler(ActionListener listener, Writeable.Reader reader) {
         this.listener = Objects.requireNonNull(listener);
-        this.responseSupplier = Objects.requireNonNull(responseSupplier);
+        this.reader = Objects.requireNonNull(reader);
     }
 
     @Override
@@ -52,12 +54,12 @@ public void handleException(TransportException e) {
     }
 
     @Override
-    public Response newInstance() {
-        return responseSupplier.get();
+    public String executor() {
+        return ThreadPool.Names.SAME;
     }
 
     @Override
-    public String executor() {
-        return ThreadPool.Names.SAME;
+    public Response read(StreamInput in) throws IOException {
+        return reader.read(in);
     }
 }
diff --git a/server/src/main/java/org/elasticsearch/action/ActionResponse.java b/server/src/main/java/org/elasticsearch/action/ActionResponse.java
index a1cd3068a269f..dd019ba3f5591 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionResponse.java
@@ -30,6 +30,13 @@
  */
 public abstract class ActionResponse extends TransportResponse {
 
+    public ActionResponse() {
+    }
+
+    public ActionResponse(StreamInput in) throws IOException {
+        super(in);
+    }
+
     @Override
     public void readFrom(StreamInput in) throws IOException {
         super.readFrom(in);
diff --git a/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java b/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java
index c369deb0b10b3..7d8dbd1f975bd 100644
--- a/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java
+++ b/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java
@@ -48,6 +48,6 @@ public void execute(final DiscoveryNode node, final Request request, final Actio
             return;
         }
         transportService.sendRequest(node, action.name(), request, transportOptions,
-            new ActionListenerResponseHandler<>(listener, action::newResponse));
+            new ActionListenerResponseHandler<>(listener, action.getResponseReader()));
     }
 }
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java
index 927d2e47680c5..69fc7ee376c0b 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java
@@ -31,6 +31,7 @@
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.util.concurrent.AbstractRunnable;
 import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
@@ -119,8 +120,10 @@ private void runOnNodeWithTaskIfPossible(Task thisTask, GetTaskRequest request,
         transportService.sendRequest(node, GetTaskAction.NAME, nodeRequest, builder.build(),
                 new TransportResponseHandler() {
                     @Override
-                    public GetTaskResponse newInstance() {
-                        return new GetTaskResponse();
+                    public GetTaskResponse read(StreamInput in) throws IOException {
+                        GetTaskResponse response = new GetTaskResponse();
+                        response.readFrom(in);
+                        return response;
                     }
 
                     @Override
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java
index ec936c623a24a..869aecf095431 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java
@@ -20,6 +20,7 @@
 package org.elasticsearch.action.admin.cluster.shards;
 
 import org.elasticsearch.action.Action;
+import org.elasticsearch.common.io.stream.Writeable;
 
 public class ClusterSearchShardsAction extends Action {
 
@@ -32,6 +33,11 @@ private ClusterSearchShardsAction() {
 
     @Override
     public ClusterSearchShardsResponse newResponse() {
-        return new ClusterSearchShardsResponse();
+        throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+    }
+
+    @Override
+    public Writeable.Reader getResponseReader() {
+        return ClusterSearchShardsResponse::new;
     }
 }
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java
index f8d448d0fe11c..57407bd61fb82 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java
@@ -38,36 +38,12 @@ public class ClusterSearchShardsResponse extends ActionResponse implements ToXCo
     public static final ClusterSearchShardsResponse EMPTY = new ClusterSearchShardsResponse(new ClusterSearchShardsGroup[0],
             new DiscoveryNode[0], Collections.emptyMap());
 
-    private ClusterSearchShardsGroup[] groups;
-    private DiscoveryNode[] nodes;
-    private Map indicesAndFilters;
+    private final ClusterSearchShardsGroup[] groups;
+    private final DiscoveryNode[] nodes;
+    private final Map indicesAndFilters;
 
-    public ClusterSearchShardsResponse() {
-
-    }
-
-    public ClusterSearchShardsResponse(ClusterSearchShardsGroup[] groups, DiscoveryNode[] nodes,
-                                       Map indicesAndFilters) {
-        this.groups = groups;
-        this.nodes = nodes;
-        this.indicesAndFilters = indicesAndFilters;
-    }
-
-    public ClusterSearchShardsGroup[] getGroups() {
-        return groups;
-    }
-
-    public DiscoveryNode[] getNodes() {
-        return nodes;
-    }
-
-    public Map getIndicesAndFilters() {
-        return indicesAndFilters;
-    }
-
-    @Override
-    public void readFrom(StreamInput in) throws IOException {
-        super.readFrom(in);
+    public ClusterSearchShardsResponse(StreamInput in) throws IOException {
+        super(in);
         groups = new ClusterSearchShardsGroup[in.readVInt()];
         for (int i = 0; i < groups.length; i++) {
             groups[i] = ClusterSearchShardsGroup.readSearchShardsGroupResponse(in);
@@ -85,6 +61,11 @@ public void readFrom(StreamInput in) throws IOException {
         }
     }
 
+    @Override
+    public void readFrom(StreamInput in) throws IOException {
+        throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+    }
+
     @Override
     public void writeTo(StreamOutput out) throws IOException {
         super.writeTo(out);
@@ -103,6 +84,25 @@ public void writeTo(StreamOutput out) throws IOException {
         }
     }
 
+    public ClusterSearchShardsResponse(ClusterSearchShardsGroup[] groups, DiscoveryNode[] nodes,
+                                       Map indicesAndFilters) {
+        this.groups = groups;
+        this.nodes = nodes;
+        this.indicesAndFilters = indicesAndFilters;
+    }
+
+    public ClusterSearchShardsGroup[] getGroups() {
+        return groups;
+    }
+
+    public DiscoveryNode[] getNodes() {
+        return nodes;
+    }
+
+    public Map getIndicesAndFilters() {
+        return indicesAndFilters;
+    }
+
     @Override
     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
         builder.startObject();
diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java
index 9774ecdffba17..f4f36ca4d65e9 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java
@@ -32,6 +32,7 @@
 import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.index.shard.ShardId;
 import org.elasticsearch.indices.IndicesService;
@@ -39,6 +40,7 @@
 import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.transport.TransportService;
 
+import java.io.IOException;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -72,7 +74,12 @@ protected ClusterBlockException checkBlock(ClusterSearchShardsRequest request, C
 
     @Override
     protected ClusterSearchShardsResponse newResponse() {
-        return new ClusterSearchShardsResponse();
+        throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+    }
+
+    @Override
+    protected ClusterSearchShardsResponse read(StreamInput in) throws IOException {
+        return new ClusterSearchShardsResponse(in);
     }
 
     @Override
diff --git a/server/src/main/java/org/elasticsearch/action/ingest/IngestActionForwarder.java b/server/src/main/java/org/elasticsearch/action/ingest/IngestActionForwarder.java
index 6f5147c38bdbb..ae5a736bde66d 100644
--- a/server/src/main/java/org/elasticsearch/action/ingest/IngestActionForwarder.java
+++ b/server/src/main/java/org/elasticsearch/action/ingest/IngestActionForwarder.java
@@ -49,7 +49,7 @@ public IngestActionForwarder(TransportService transportService) {
 
     public void forwardIngestRequest(Action action, ActionRequest request, ActionListener listener) {
         transportService.sendRequest(randomIngestNode(), action.name(), request,
-            new ActionListenerResponseHandler(listener, action::newResponse));
+            new ActionListenerResponseHandler(listener, action.getResponseReader()));
     }
 
     private DiscoveryNode randomIngestNode() {
diff --git a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java
index 6d0c35345b1fa..50d75b20dc82b 100644
--- a/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java
+++ b/server/src/main/java/org/elasticsearch/action/resync/TransportResyncReplicationAction.java
@@ -32,6 +32,7 @@
 import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.index.engine.Engine;
 import org.elasticsearch.index.seqno.SequenceNumbers;
@@ -45,6 +46,7 @@
 import org.elasticsearch.transport.TransportResponseHandler;
 import org.elasticsearch.transport.TransportService;
 
+import java.io.IOException;
 import java.util.function.Consumer;
 import java.util.function.Supplier;
 
@@ -151,8 +153,10 @@ public void sync(ResyncReplicationRequest request, Task parentTask, String prima
             transportOptions,
             new TransportResponseHandler() {
                 @Override
-                public ResyncReplicationResponse newInstance() {
-                    return newResponseInstance();
+                public ResyncReplicationResponse read(StreamInput in) throws IOException {
+                    ResyncReplicationResponse response = newResponseInstance();
+                    response.readFrom(in);
+                    return response;
                 }
 
                 @Override
diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java
index cb30385ecc868..f2b1b0d5c6265 100644
--- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java
@@ -135,6 +135,10 @@ public Exception getFailure() {
     MultiSearchResponse() {
     }
 
+    MultiSearchResponse(StreamInput in) throws IOException {
+        readFrom(in);
+    }
+
     public MultiSearchResponse(Item[] items, long tookInMillis) {
         this.items = items;
         this.tookInMillis = tookInMillis;
diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java
index fd43a948ee5fb..302ed4ccbfec9 100644
--- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java
+++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java
@@ -29,6 +29,7 @@
 import org.elasticsearch.common.component.AbstractComponent;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
 import org.elasticsearch.search.SearchPhaseResult;
@@ -60,7 +61,6 @@
 import java.util.HashMap;
 import java.util.Map;
 import java.util.function.BiFunction;
-import java.util.function.Supplier;
 
 /**
  * An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through
@@ -119,7 +119,7 @@ public void sendCanMatch(Transport.Connection connection, final ShardSearchTrans
 
     public void sendClearAllScrollContexts(Transport.Connection connection, final ActionListener listener) {
         transportService.sendRequest(connection, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, TransportRequest.Empty.INSTANCE,
-            TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
+            TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, (in) -> TransportResponse.Empty.INSTANCE));
     }
 
     public void sendExecuteDfs(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task,
@@ -133,11 +133,11 @@ public void sendExecuteQuery(Transport.Connection connection, final ShardSearchT
         // we optimize this and expect a QueryFetchSearchResult if we only have a single shard in the search request
         // this used to be the QUERY_AND_FETCH which doesn't exist anymore.
         final boolean fetchDocuments = request.numberOfShards() == 1;
-        Supplier supplier = fetchDocuments ? QueryFetchSearchResult::new : QuerySearchResult::new;
+        Writeable.Reader reader = fetchDocuments ? QueryFetchSearchResult::new : QuerySearchResult::new;
 
         final ActionListener handler = responseWrapper.apply(connection, listener);
         transportService.sendChildRequest(connection, QUERY_ACTION_NAME, request, task,
-                new ConnectionCountingHandler<>(handler, supplier, clientConnections, connection.getNode().getId()));
+                new ConnectionCountingHandler<>(handler, reader, clientConnections, connection.getNode().getId()));
     }
 
     public void sendExecuteQuery(Transport.Connection connection, final QuerySearchRequest request, SearchTask task,
@@ -155,8 +155,8 @@ public void sendExecuteScrollQuery(Transport.Connection connection, final Intern
     public void sendExecuteScrollFetch(Transport.Connection connection, final InternalScrollSearchRequest request, SearchTask task,
                                        final SearchActionListener listener) {
         transportService.sendChildRequest(connection, QUERY_FETCH_SCROLL_ACTION_NAME, request, task,
-                new ConnectionCountingHandler<>(listener, ScrollQueryFetchSearchResult::new,
-                        clientConnections, connection.getNode().getId()));
+                new ConnectionCountingHandler<>(listener, ScrollQueryFetchSearchResult::new, clientConnections,
+                    connection.getNode().getId()));
     }
 
     public void sendExecuteFetch(Transport.Connection connection, final ShardFetchSearchRequest request, SearchTask task,
@@ -279,6 +279,10 @@ public static class SearchFreeContextResponse extends TransportResponse {
         SearchFreeContextResponse() {
         }
 
+        SearchFreeContextResponse(StreamInput in) throws IOException {
+            freed = in.readBoolean();
+        }
+
         SearchFreeContextResponse(boolean freed) {
             this.freed = freed;
         }
@@ -306,22 +310,20 @@ public static void registerRequestHandler(TransportService transportService, Sea
                 boolean freed = searchService.freeContext(request.id());
                 channel.sendResponse(new SearchFreeContextResponse(freed));
         });
-        TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_SCROLL_ACTION_NAME,
-                (Supplier) SearchFreeContextResponse::new);
+        TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_SCROLL_ACTION_NAME, SearchFreeContextResponse::new);
         transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, ThreadPool.Names.SAME, SearchFreeContextRequest::new,
             (request, channel, task) -> {
                 boolean freed = searchService.freeContext(request.id());
                 channel.sendResponse(new SearchFreeContextResponse(freed));
         });
-        TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_ACTION_NAME,
-                (Supplier) SearchFreeContextResponse::new);
+        TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_ACTION_NAME, SearchFreeContextResponse::new);
         transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, () -> TransportRequest.Empty.INSTANCE,
             ThreadPool.Names.SAME, (request, channel, task) -> {
                 searchService.freeAllScrollContexts();
                 channel.sendResponse(TransportResponse.Empty.INSTANCE);
         });
         TransportActionProxy.registerProxyAction(transportService, CLEAR_SCROLL_CONTEXTS_ACTION_NAME,
-                () -> TransportResponse.Empty.INSTANCE);
+            (in) -> TransportResponse.Empty.INSTANCE);
 
         transportService.registerRequestHandler(DFS_ACTION_NAME, ThreadPool.Names.SAME, ShardSearchTransportRequest::new,
             (request, channel, task) -> {
@@ -352,8 +354,8 @@ public void onFailure(Exception e) {
                 searchService.executeQueryPhase(request, (SearchTask) task, new ChannelActionListener<>(
                     channel, QUERY_ACTION_NAME, request));
             });
-        TransportActionProxy.registerProxyAction(transportService, QUERY_ACTION_NAME,
-                (request) -> ((ShardSearchRequest)request).numberOfShards() == 1 ? QueryFetchSearchResult::new : QuerySearchResult::new);
+        TransportActionProxy.registerProxyActionWithDynamicResponseType(transportService, QUERY_ACTION_NAME,
+            (request) -> ((ShardSearchRequest)request).numberOfShards() == 1 ? QueryFetchSearchResult::new : QuerySearchResult::new);
 
         transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, ThreadPool.Names.SAME, QuerySearchRequest::new,
             (request, channel, task) -> {
@@ -395,8 +397,7 @@ public void onFailure(Exception e) {
             (request, channel, task) -> {
                 searchService.canMatch(request, new ChannelActionListener<>(channel, QUERY_CAN_MATCH_NAME, request));
             });
-        TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NAME,
-                (Supplier) SearchService.CanMatchResponse::new);
+        TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NAME, SearchService.CanMatchResponse::new);
     }
 
 
@@ -419,9 +420,9 @@ final class ConnectionCountingHandler extend
         private final Map clientConnections;
         private final String nodeId;
 
-        ConnectionCountingHandler(final ActionListener listener, final Supplier responseSupplier,
+        ConnectionCountingHandler(final ActionListener listener, final Writeable.Reader responseReader,
                                   final Map clientConnections, final String nodeId) {
-            super(listener, responseSupplier);
+            super(listener, responseReader);
             this.clientConnections = clientConnections;
             this.nodeId = nodeId;
             // Increment the number of connections for this node by one
diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java
index 22c4a70b0ea55..27dcb11da3869 100644
--- a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java
@@ -35,6 +35,7 @@
 import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.util.concurrent.AbstractRunnable;
 import org.elasticsearch.tasks.Task;
@@ -173,8 +174,10 @@ protected void performOperation(final ShardIterator shardIt, final ShardRouting
                     } else {
                         transportService.sendRequest(node, transportShardAction, shardRequest, new TransportResponseHandler() {
                             @Override
-                            public ShardResponse newInstance() {
-                                return newShardResponse();
+                            public ShardResponse read(StreamInput in) throws IOException {
+                                ShardResponse response = newShardResponse();
+                                response.readFrom(in);
+                                return response;
                             }
 
                             @Override
diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java
index 9079238b7b62e..f097539626458 100644
--- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java
@@ -313,8 +313,10 @@ private void sendNodeRequest(final DiscoveryNode node, List shards
                 }
                 transportService.sendRequest(node, transportNodeBroadcastAction, nodeRequest, new TransportResponseHandler() {
                     @Override
-                    public NodeResponse newInstance() {
-                        return new NodeResponse();
+                    public NodeResponse read(StreamInput in) throws IOException {
+                        NodeResponse nodeResponse = new NodeResponse();
+                        nodeResponse.readFrom(in);
+                        return nodeResponse;
                     }
 
                     @Override
diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java
index a9ed05ac0377f..10780a55c27bf 100644
--- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java
@@ -35,6 +35,8 @@
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.node.DiscoveryNodes;
 import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.Streamable;
 import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
@@ -47,6 +49,7 @@
 import org.elasticsearch.transport.TransportException;
 import org.elasticsearch.transport.TransportService;
 
+import java.io.IOException;
 import java.util.function.Predicate;
 import java.util.function.Supplier;
 
@@ -101,8 +104,21 @@ protected TransportMasterNodeAction(Settings settings, String actionName, boolea
 
     protected abstract String executor();
 
+    /**
+     * @deprecated new implementors should override {@link #read(StreamInput)} and use the
+     *             {@link Writeable.Reader} interface.
+     * @return a new response instance. Typically this is used for serialization using the
+     *         {@link Streamable#readFrom(StreamInput)} method.
+     */
+    @Deprecated
     protected abstract Response newResponse();
 
+    protected Response read(StreamInput in) throws IOException {
+        Response response = newResponse();
+        response.readFrom(in);
+        return response;
+    }
+
     protected abstract void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception;
 
     /**
@@ -201,21 +217,21 @@ protected void doRun() throws Exception {
                     } else {
                         DiscoveryNode masterNode = nodes.getMasterNode();
                         final String actionName = getMasterActionName(masterNode);
-                        transportService.sendRequest(masterNode, actionName, request, new ActionListenerResponseHandler(listener,
-                            TransportMasterNodeAction.this::newResponse) {
-                            @Override
-                            public void handleException(final TransportException exp) {
-                                Throwable cause = exp.unwrapCause();
-                                if (cause instanceof ConnectTransportException) {
-                                    // we want to retry here a bit to see if a new master is elected
-                                    logger.debug("connection exception while trying to forward request with action name [{}] to " +
-                                            "master node [{}], scheduling a retry. Error: [{}]",
-                                        actionName, nodes.getMasterNode(), exp.getDetailedMessage());
-                                    retry(cause, masterChangePredicate);
-                                } else {
-                                    listener.onFailure(exp);
+                        transportService.sendRequest(masterNode, actionName, request,
+                            new ActionListenerResponseHandler(listener, TransportMasterNodeAction.this::read) {
+                                @Override
+                                public void handleException(final TransportException exp) {
+                                    Throwable cause = exp.unwrapCause();
+                                    if (cause instanceof ConnectTransportException) {
+                                        // we want to retry here a bit to see if a new master is elected
+                                        logger.debug("connection exception while trying to forward request with action name [{}] to " +
+                                                "master node [{}], scheduling a retry. Error: [{}]",
+                                            actionName, nodes.getMasterNode(), exp.getDetailedMessage());
+                                        retry(cause, masterChangePredicate);
+                                    } else {
+                                        listener.onFailure(exp);
+                                    }
                                 }
-                            }
                         });
                     }
                 }
diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java
index 2be4e5bf053cc..317792c610479 100644
--- a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java
@@ -27,6 +27,7 @@
 import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.tasks.Task;
 import org.elasticsearch.threadpool.ThreadPool;
@@ -39,6 +40,7 @@
 import org.elasticsearch.transport.TransportResponseHandler;
 import org.elasticsearch.transport.TransportService;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -186,8 +188,10 @@ void start() {
                     transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(),
                             new TransportResponseHandler() {
                                 @Override
-                                public NodeResponse newInstance() {
-                                    return newNodeResponse();
+                                public NodeResponse read(StreamInput in) throws IOException {
+                                    NodeResponse nodeResponse = newNodeResponse();
+                                    nodeResponse.readFrom(in);
+                                    return nodeResponse;
                                 }
 
                                 @Override
diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
index 695c9162633f6..820ab0300d69a 100644
--- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
@@ -48,6 +48,7 @@
 import org.elasticsearch.common.Nullable;
 import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.common.lease.Releasable;
 import org.elasticsearch.common.lease.Releasables;
 import org.elasticsearch.common.settings.Settings;
@@ -317,12 +318,17 @@ public void onResponse(PrimaryShardReference primaryShardReference) {
                     // phase is executed on local shard and all subsequent operations are executed on relocation target as primary phase.
                     final ShardRouting primary = primaryShardReference.routingEntry();
                     assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary;
+                    final Writeable.Reader reader = in -> {
+                        Response response = TransportReplicationAction.this.newResponseInstance();
+                        response.readFrom(in);
+                        return response;
+                    };
                     DiscoveryNode relocatingNode = clusterService.state().nodes().get(primary.relocatingNodeId());
                     transportService.sendRequest(relocatingNode, transportPrimaryAction,
                         new ConcreteShardRequest<>(request, primary.allocationId().getRelocationId(), primaryTerm),
                         transportOptions,
                         new TransportChannelResponseHandler(logger, channel, "rerouting indexing to target primary " + primary,
-                            TransportReplicationAction.this::newResponseInstance) {
+                            reader) {
 
                             @Override
                             public void handleResponse(Response response) {
@@ -577,7 +583,7 @@ public void onNewClusterState(ClusterState state) {
                         String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
                         TransportChannelResponseHandler handler =
                             new TransportChannelResponseHandler<>(logger, channel, extraMessage,
-                                () -> TransportResponse.Empty.INSTANCE);
+                                (in) -> TransportResponse.Empty.INSTANCE);
                         transportService.sendRequest(clusterService.localNode(), transportReplicaAction,
                             new ConcreteReplicaRequest<>(request, targetAllocationID, primaryTerm,
                                 globalCheckpoint, maxSeqNoOfUpdatesOrDeletes),
@@ -813,8 +819,10 @@ private void performAction(final DiscoveryNode node, final String action, final
             transportService.sendRequest(node, action, requestToPerform, transportOptions, new TransportResponseHandler() {
 
                 @Override
-                public Response newInstance() {
-                    return newResponseInstance();
+                public Response read(StreamInput in) throws IOException {
+                    Response response = newResponseInstance();
+                    response.readFrom(in);
+                    return response;
                 }
 
                 @Override
@@ -1186,7 +1194,11 @@ protected void sendReplicaRequest(
             final ConcreteReplicaRequest replicaRequest,
             final DiscoveryNode node,
             final ActionListener listener) {
-        final ActionListenerResponseHandler handler = new ActionListenerResponseHandler<>(listener, ReplicaResponse::new);
+        final ActionListenerResponseHandler handler = new ActionListenerResponseHandler<>(listener, in -> {
+            ReplicaResponse replicaResponse = new ReplicaResponse();
+            replicaResponse.readFrom(in);
+            return replicaResponse;
+        });
         transportService.sendRequest(node, transportReplicaAction, replicaRequest, transportOptions, handler);
     }
 
diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java
index e8e710aa81f2c..3a5d8d0e382e9 100644
--- a/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java
@@ -34,6 +34,7 @@
 import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.node.NodeClosedException;
@@ -47,6 +48,7 @@
 import org.elasticsearch.transport.TransportResponseHandler;
 import org.elasticsearch.transport.TransportService;
 
+import java.io.IOException;
 import java.util.function.Supplier;
 
 public abstract class TransportInstanceSingleOperationAction, Response extends ActionResponse>
@@ -178,8 +180,10 @@ protected void doStart(ClusterState clusterState) {
             transportService.sendRequest(node, shardActionName, request, transportOptions(), new TransportResponseHandler() {
 
                 @Override
-                public Response newInstance() {
-                    return newResponse();
+                public Response read(StreamInput in) throws IOException {
+                    Response response = newResponse();
+                    response.readFrom(in);
+                    return response;
                 }
 
                 @Override
diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java
index 436089ab3be73..0a50413e96964 100644
--- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java
@@ -37,6 +37,7 @@
 import org.elasticsearch.cluster.routing.ShardsIterator;
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.logging.LoggerMessageFormat;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.util.concurrent.AbstractRunnable;
@@ -182,8 +183,10 @@ public void start() {
                 // just execute it on the local node
                 transportService.sendRequest(clusterService.localNode(), transportShardAction, internalRequest.request(), new TransportResponseHandler() {
                     @Override
-                    public Response newInstance() {
-                        return newResponse();
+                    public Response read(StreamInput in) throws IOException {
+                        Response response = newResponse();
+                        response.readFrom(in);
+                        return response;
                     }
 
                     @Override
@@ -246,8 +249,10 @@ private void perform(@Nullable final Exception currentFailure) {
                 transportService.sendRequest(node, transportShardAction, internalRequest.request(), new TransportResponseHandler() {
 
                     @Override
-                    public Response newInstance() {
-                        return newResponse();
+                    public Response read(StreamInput in) throws IOException {
+                        Response response = newResponse();
+                        response.readFrom(in);
+                        return response;
                     }
 
                     @Override
diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java
index 38a0d96600ce8..dad2bb8ad0896 100644
--- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java
@@ -270,8 +270,10 @@ private void start() {
                             transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(),
                                 new TransportResponseHandler() {
                                     @Override
-                                    public NodeTasksResponse newInstance() {
-                                        return new NodeTasksResponse();
+                                    public NodeTasksResponse read(StreamInput in) throws IOException {
+                                        NodeTasksResponse response = new NodeTasksResponse();
+                                        response.readFrom(in);
+                                        return response;
                                     }
 
                                     @Override
diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java
index aa0672d80ba1d..0cfc1f5004ce8 100644
--- a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java
+++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java
@@ -511,8 +511,10 @@ protected void doRun() throws Exception {
                                 new TransportResponseHandler() {
 
                                     @Override
-                                    public ClusterStateResponse newInstance() {
-                                        return new ClusterStateResponse();
+                                    public ClusterStateResponse read(StreamInput in) throws IOException {
+                                        final ClusterStateResponse clusterStateResponse = new ClusterStateResponse();
+                                        clusterStateResponse.readFrom(in);
+                                        return clusterStateResponse;
                                     }
 
                                     @Override
diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java
index 5acf2effad390..b48ea77e64c75 100644
--- a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java
+++ b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java
@@ -225,8 +225,8 @@ public void run() {
             transportService.sendRequest(masterToPing, MASTER_PING_ACTION_NAME, request, options,
                 new TransportResponseHandler() {
                         @Override
-                        public MasterPingResponseResponse newInstance() {
-                            return new MasterPingResponseResponse();
+                        public MasterPingResponseResponse read(StreamInput in) throws IOException {
+                            return new MasterPingResponseResponse(in);
                         }
 
                         @Override
@@ -433,14 +433,8 @@ private static class MasterPingResponseResponse extends TransportResponse {
         private MasterPingResponseResponse() {
         }
 
-        @Override
-        public void readFrom(StreamInput in) throws IOException {
-            super.readFrom(in);
-        }
-
-        @Override
-        public void writeTo(StreamOutput out) throws IOException {
-            super.writeTo(out);
+        private MasterPingResponseResponse(StreamInput in) throws IOException {
+            super(in);
         }
     }
 }
diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java
index 57e5cab020be1..40bde9ee81d15 100644
--- a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java
+++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java
@@ -226,8 +226,8 @@ public void run() {
                 .withTimeout(pingRetryTimeout).build();
             transportService.sendRequest(node, PING_ACTION_NAME, newPingRequest(), options, new TransportResponseHandler() {
                         @Override
-                        public PingResponse newInstance() {
-                            return new PingResponse();
+                        public PingResponse read(StreamInput in) throws IOException {
+                            return new PingResponse(in);
                         }
 
                         @Override
@@ -359,14 +359,8 @@ private static class PingResponse extends TransportResponse {
         private PingResponse() {
         }
 
-        @Override
-        public void readFrom(StreamInput in) throws IOException {
-            super.readFrom(in);
-        }
-
-        @Override
-        public void writeTo(StreamOutput out) throws IOException {
-            super.writeTo(out);
+        private PingResponse(StreamInput in) throws IOException {
+            super(in);
         }
     }
 }
diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java
index 7bc2e38dde024..5630ceea72945 100644
--- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java
+++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java
@@ -84,8 +84,10 @@ public void allocateDangled(Collection indices, final Listener li
         AllocateDangledRequest request = new AllocateDangledRequest(clusterService.localNode(), indices.toArray(new IndexMetaData[indices.size()]));
         transportService.sendRequest(masterNode, ACTION_NAME, request, new TransportResponseHandler() {
             @Override
-            public AllocateDangledResponse newInstance() {
-                return new AllocateDangledResponse();
+            public AllocateDangledResponse read(StreamInput in) throws IOException {
+                final AllocateDangledResponse response = new AllocateDangledResponse();
+                response.readFrom(in);
+                return response;
             }
 
             @Override
diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
index fb7885a217e01..aeb88021f26e1 100644
--- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
+++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java
@@ -313,8 +313,10 @@ protected void getInflightOpsCount(final ShardId shardId, ClusterState state, In
             transportService.sendRequest(primaryNode, IN_FLIGHT_OPS_ACTION_NAME, new InFlightOpsRequest(shardId),
                     new TransportResponseHandler() {
                         @Override
-                        public InFlightOpsResponse newInstance() {
-                            return new InFlightOpsResponse();
+                        public InFlightOpsResponse read(StreamInput in) throws IOException {
+                            InFlightOpsResponse response = new InFlightOpsResponse();
+                            response.readFrom(in);
+                            return response;
                         }
 
                         @Override
@@ -383,8 +385,10 @@ void sendSyncRequests(final String syncId, final List shards, Clus
             transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new ShardSyncedFlushRequest(shard.shardId(), syncId, preSyncedResponse.commitId),
                     new TransportResponseHandler() {
                         @Override
-                        public ShardSyncedFlushResponse newInstance() {
-                            return new ShardSyncedFlushResponse();
+                        public ShardSyncedFlushResponse read(StreamInput in) throws IOException {
+                            ShardSyncedFlushResponse response = new ShardSyncedFlushResponse();
+                            response.readFrom(in);
+                            return response;
                         }
 
                         @Override
@@ -437,8 +441,10 @@ void sendPreSyncRequests(final List shards, final ClusterState sta
             }
             transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreShardSyncedFlushRequest(shard.shardId()), new TransportResponseHandler() {
                 @Override
-                public PreSyncedFlushResponse newInstance() {
-                    return new PreSyncedFlushResponse();
+                public PreSyncedFlushResponse read(StreamInput in) throws IOException {
+                    PreSyncedFlushResponse response = new PreSyncedFlushResponse();
+                    response.readFrom(in);
+                    return response;
                 }
 
                 @Override
diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java
index f60994a4bced4..39709eb3ac2ff 100644
--- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java
+++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java
@@ -35,6 +35,7 @@
 import org.elasticsearch.cluster.service.ClusterService;
 import org.elasticsearch.common.Nullable;
 import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.unit.ByteSizeValue;
 import org.elasticsearch.common.unit.TimeValue;
@@ -195,8 +196,10 @@ private void doRecovery(final long recoveryId) {
                     transportService.submitRequest(request.sourceNode(), PeerRecoverySourceService.Actions.START_RECOVERY, request,
                             new FutureTransportResponseHandler() {
                                 @Override
-                                public RecoveryResponse newInstance() {
-                                    return new RecoveryResponse();
+                                public RecoveryResponse read(StreamInput in) throws IOException {
+                                    RecoveryResponse recoveryResponse = new RecoveryResponse();
+                                    recoveryResponse.readFrom(in);
+                                    return recoveryResponse;
                                 }
                             }).txGet()));
             final RecoveryResponse recoveryResponse = responseHolder.get();
diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsResponse.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsResponse.java
index 530b8b67415d3..8633380f3947a 100644
--- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsResponse.java
+++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTranslogOperationsResponse.java
@@ -63,8 +63,10 @@ public void readFrom(final StreamInput in) throws IOException {
     static TransportResponseHandler HANDLER =
             new FutureTransportResponseHandler() {
                 @Override
-                public RecoveryTranslogOperationsResponse newInstance() {
-                    return new RecoveryTranslogOperationsResponse();
+                public RecoveryTranslogOperationsResponse read(StreamInput in) throws IOException {
+                    RecoveryTranslogOperationsResponse response = new RecoveryTranslogOperationsResponse();
+                    response.readFrom(in);
+                    return response;
                 }
             };
 
diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java
index 6512e25fc0b78..f1a43077a6215 100644
--- a/server/src/main/java/org/elasticsearch/search/SearchService.java
+++ b/server/src/main/java/org/elasticsearch/search/SearchService.java
@@ -1101,6 +1101,10 @@ public static final class CanMatchResponse extends SearchPhaseResult {
         public CanMatchResponse() {
         }
 
+        public CanMatchResponse(StreamInput in) throws IOException {
+            this.canMatch = in.readBoolean();
+        }
+
         public CanMatchResponse(boolean canMatch) {
             this.canMatch = canMatch;
         }
diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java
index 8de89089c4f01..718b895217433 100644
--- a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java
+++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java
@@ -46,6 +46,10 @@ public class DfsSearchResult extends SearchPhaseResult {
     public DfsSearchResult() {
     }
 
+    public DfsSearchResult(StreamInput in) throws IOException {
+        readFrom(in);
+    }
+
     public DfsSearchResult(long id, SearchShardTarget shardTarget) {
         this.setSearchShardTarget(shardTarget);
         this.requestId = id;
diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java
index a5f27733ad28a..12391151861d0 100644
--- a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java
+++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java
@@ -38,6 +38,10 @@ public final class FetchSearchResult extends SearchPhaseResult {
     public FetchSearchResult() {
     }
 
+    public FetchSearchResult(StreamInput in) throws IOException {
+        readFrom(in);
+    }
+
     public FetchSearchResult(long id, SearchShardTarget shardTarget) {
         this.requestId = id;
         setSearchShardTarget(shardTarget);
diff --git a/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java
index 8d1e6276e65d9..0a5a7cec375db 100644
--- a/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java
+++ b/server/src/main/java/org/elasticsearch/search/fetch/QueryFetchSearchResult.java
@@ -38,6 +38,10 @@ public final class QueryFetchSearchResult extends SearchPhaseResult {
     public QueryFetchSearchResult() {
     }
 
+    public QueryFetchSearchResult(StreamInput in) throws IOException {
+        readFrom(in);
+    }
+
     public QueryFetchSearchResult(QuerySearchResult queryResult, FetchSearchResult fetchResult) {
         this.queryResult = queryResult;
         this.fetchResult = fetchResult;
diff --git a/server/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java
index 55aa4a96d018c..6b0a8b619bff3 100644
--- a/server/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java
+++ b/server/src/main/java/org/elasticsearch/search/fetch/ScrollQueryFetchSearchResult.java
@@ -36,6 +36,10 @@ public final class ScrollQueryFetchSearchResult extends SearchPhaseResult {
     public ScrollQueryFetchSearchResult() {
     }
 
+    public ScrollQueryFetchSearchResult(StreamInput in) throws IOException {
+        readFrom(in);
+    }
+
     public ScrollQueryFetchSearchResult(QueryFetchSearchResult result, SearchShardTarget shardTarget) {
         this.result = result;
         setSearchShardTarget(shardTarget);
diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
index 2aded57ece04c..43654823914b4 100644
--- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
+++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java
@@ -66,6 +66,10 @@ public final class QuerySearchResult extends SearchPhaseResult {
     public QuerySearchResult() {
     }
 
+    public QuerySearchResult(StreamInput in) throws IOException {
+        readFrom(in);
+    }
+
     public QuerySearchResult(long id, SearchShardTarget shardTarget) {
         this.requestId = id;
         setSearchShardTarget(shardTarget);
diff --git a/server/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java
index 6401459489955..632d148ea901b 100644
--- a/server/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java
+++ b/server/src/main/java/org/elasticsearch/search/query/ScrollQuerySearchResult.java
@@ -35,6 +35,10 @@ public final class ScrollQuerySearchResult extends SearchPhaseResult {
     public ScrollQuerySearchResult() {
     }
 
+    public ScrollQuerySearchResult(StreamInput in) throws IOException {
+        readFrom(in);
+    }
+
     public ScrollQuerySearchResult(QuerySearchResult result, SearchShardTarget shardTarget) {
         this.result = result;
         setSearchShardTarget(shardTarget);
diff --git a/server/src/main/java/org/elasticsearch/transport/EmptyTransportResponseHandler.java b/server/src/main/java/org/elasticsearch/transport/EmptyTransportResponseHandler.java
index c5814cf0fefcc..7ff1ef8391fd6 100644
--- a/server/src/main/java/org/elasticsearch/transport/EmptyTransportResponseHandler.java
+++ b/server/src/main/java/org/elasticsearch/transport/EmptyTransportResponseHandler.java
@@ -19,6 +19,7 @@
 
 package org.elasticsearch.transport;
 
+import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.threadpool.ThreadPool;
 
 public class EmptyTransportResponseHandler implements TransportResponseHandler {
@@ -32,7 +33,7 @@ public EmptyTransportResponseHandler(String executor) {
     }
 
     @Override
-    public TransportResponse.Empty newInstance() {
+    public TransportResponse.Empty read(StreamInput in) {
         return TransportResponse.Empty.INSTANCE;
     }
 
diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java
index d93bbb57201e2..8e72e6d5768f1 100644
--- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java
+++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java
@@ -47,7 +47,7 @@ void doExecute(Action action, Request request, ActionListener {
             Transport.Connection connection = remoteClusterService.getConnection(clusterAlias);
             service.sendRequest(connection, action.name(), request, TransportRequestOptions.EMPTY,
-                new ActionListenerResponseHandler<>(listener, action::newResponse));
+                new ActionListenerResponseHandler<>(listener, action.getResponseReader()));
         },
         listener::onFailure));
     }
diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java
index c9f3a2aa36540..48f086ad972bf 100644
--- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java
+++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java
@@ -218,8 +218,8 @@ private void fetchShardsInternal(ClusterSearchShardsRequest searchShardsRequest,
             new TransportResponseHandler() {
 
                 @Override
-                public ClusterSearchShardsResponse newInstance() {
-                    return new ClusterSearchShardsResponse();
+                public ClusterSearchShardsResponse read(StreamInput in) throws IOException {
+                    return new ClusterSearchShardsResponse(in);
                 }
 
                 @Override
@@ -591,8 +591,10 @@ private class SniffClusterStateResponseHandler implements TransportResponseHandl
             }
 
             @Override
-            public ClusterStateResponse newInstance() {
-                return new ClusterStateResponse();
+            public ClusterStateResponse read(StreamInput in) throws IOException {
+                ClusterStateResponse response = new ClusterStateResponse();
+                response.readFrom(in);
+                return response;
             }
 
             @Override
diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java
index 27b4aa7293e18..ad41e8c2902a3 100644
--- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java
+++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java
@@ -205,7 +205,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
 
     private final MeanMetric readBytesMetric = new MeanMetric();
     private final MeanMetric transmittedBytesMetric = new MeanMetric();
-    private volatile Map requestHandlers = Collections.emptyMap();
+    private volatile Map> requestHandlers = Collections.emptyMap();
     private final ResponseHandlers responseHandlers = new ResponseHandlers();
     private final TransportLogger transportLogger;
     private final BytesReference pingMessage;
@@ -284,8 +284,8 @@ private static class HandshakeResponseHandler implements TransportResponseHandle
         }
 
         @Override
-        public VersionHandshakeResponse newInstance() {
-            return new VersionHandshakeResponse();
+        public VersionHandshakeResponse read(StreamInput in) throws IOException {
+            return new VersionHandshakeResponse(in);
         }
 
         @Override
@@ -1273,7 +1273,8 @@ public final void messageReceived(BytesReference reference, TcpChannel channel)
                 if (isHandshake) {
                     handler = pendingHandshakes.remove(requestId);
                 } else {
-                    TransportResponseHandler theHandler = responseHandlers.onResponseReceived(requestId, messageListener);
+                    TransportResponseHandler theHandler =
+                        responseHandlers.onResponseReceived(requestId, messageListener);
                     if (theHandler == null && TransportStatus.isError(status)) {
                         handler = pendingHandshakes.remove(requestId);
                     } else {
@@ -1319,8 +1320,9 @@ static void ensureVersionCompatibility(Version version, Version currentVersion,
         }
     }
 
-    private void handleResponse(InetSocketAddress remoteAddress, final StreamInput stream, final TransportResponseHandler handler) {
-        final TransportResponse response;
+    private  void handleResponse(InetSocketAddress remoteAddress, final StreamInput stream,
+                                final TransportResponseHandler handler) {
+        final T response;
         try {
             response = handler.read(stream);
             response.remoteAddress(new TransportAddress(remoteAddress));
@@ -1469,17 +1471,13 @@ public void onFailure(Exception e) {
     }
 
     private static final class VersionHandshakeResponse extends TransportResponse {
-        private Version version;
+        private final Version version;
 
         private VersionHandshakeResponse(Version version) {
             this.version = version;
         }
 
-        private VersionHandshakeResponse() {
-        }
-
-        @Override
-        public void readFrom(StreamInput in) throws IOException {
+        private VersionHandshakeResponse(StreamInput in) throws IOException {
             super.readFrom(in);
             version = Version.readVersion(in);
         }
@@ -1736,7 +1734,7 @@ public final ResponseHandlers getResponseHandlers() {
     }
 
     @Override
-    public final RequestHandlerRegistry getRequestHandler(String action) {
+    public final RequestHandlerRegistry getRequestHandler(String action) {
         return requestHandlers.get(action);
     }
 }
diff --git a/server/src/main/java/org/elasticsearch/transport/Transport.java b/server/src/main/java/org/elasticsearch/transport/Transport.java
index fc1f0c9e5ec0f..e13213dca066a 100644
--- a/server/src/main/java/org/elasticsearch/transport/Transport.java
+++ b/server/src/main/java/org/elasticsearch/transport/Transport.java
@@ -54,7 +54,7 @@ public interface Transport extends LifecycleComponent {
      * Returns the registered request handler registry for the given action or null if it's not registered
      * @param action the action to look up
      */
-    RequestHandlerRegistry getRequestHandler(String action);
+    RequestHandlerRegistry getRequestHandler(String action);
 
     void addMessageListener(TransportMessageListener listener);
 
@@ -184,7 +184,7 @@ public String action() {
      * This class is a registry that allows
      */
     final class ResponseHandlers {
-        private final ConcurrentMapLong handlers = ConcurrentCollections
+        private final ConcurrentMapLong> handlers = ConcurrentCollections
             .newConcurrentMapLongWithAggressiveConcurrency();
         private final AtomicLong requestIdGenerator = new AtomicLong();
 
@@ -208,7 +208,7 @@ public ResponseContext remove(long requestId) {
          * @return the new request ID
          * @see Connection#sendRequest(long, String, TransportRequest, TransportRequestOptions)
          */
-        public long add(ResponseContext holder) {
+        public long add(ResponseContext holder) {
             long requestId = newRequestId();
             ResponseContext existing = handlers.put(requestId, holder);
             assert existing == null : "request ID already in use: " + requestId;
@@ -226,10 +226,10 @@ long newRequestId() {
         /**
          * Removes and returns all {@link ResponseContext} instances that match the predicate
          */
-        public List prune(Predicate predicate) {
-            final List holders = new ArrayList<>();
-            for (Map.Entry entry : handlers.entrySet()) {
-                ResponseContext holder = entry.getValue();
+        public List> prune(Predicate predicate) {
+            final List> holders = new ArrayList<>();
+            for (Map.Entry> entry : handlers.entrySet()) {
+                ResponseContext holder = entry.getValue();
                 if (predicate.test(holder)) {
                     ResponseContext remove = handlers.remove(entry.getKey());
                     if (remove != null) {
@@ -245,8 +245,9 @@ public List prune(Predicate predicate) {
          * sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not
          * found.
          */
-        public TransportResponseHandler onResponseReceived(final long requestId, TransportMessageListener listener) {
-            ResponseContext context = handlers.remove(requestId);
+        public TransportResponseHandler onResponseReceived(final long requestId,
+                                                                                        final TransportMessageListener listener) {
+            ResponseContext context = handlers.remove(requestId);
             listener.onResponseReceived(requestId, context);
             if (context == null) {
                 return null;
diff --git a/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java b/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java
index a17509e826003..a5b926249f8e2 100644
--- a/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java
+++ b/server/src/main/java/org/elasticsearch/transport/TransportActionProxy.java
@@ -28,7 +28,6 @@
 import java.io.IOException;
 import java.io.UncheckedIOException;
 import java.util.function.Function;
-import java.util.function.Supplier;
 
 /**
  * TransportActionProxy allows an arbitrary action to be executed on a defined target node while the initial request is sent to a second
@@ -43,10 +42,10 @@ private static class ProxyRequestHandler implements Tran
 
         private final TransportService service;
         private final String action;
-        private final Function> responseFunction;
+        private final Function> responseFunction;
 
         ProxyRequestHandler(TransportService service, String action, Function> responseFunction) {
+                Writeable.Reader> responseFunction) {
             this.service = service;
             this.action = action;
             this.responseFunction = responseFunction;
@@ -63,17 +62,17 @@ public void messageReceived(T request, TransportChannel channel, Task task) thro
 
     private static class ProxyResponseHandler implements TransportResponseHandler {
 
-        private final Supplier responseFactory;
+        private final Writeable.Reader reader;
         private final TransportChannel channel;
 
-        ProxyResponseHandler(TransportChannel channel, Supplier responseFactory) {
-            this.responseFactory = responseFactory;
+        ProxyResponseHandler(TransportChannel channel, Writeable.Reader reader) {
+            this.reader = reader;
             this.channel = channel;
-
         }
+
         @Override
-        public T newInstance() {
-            return responseFactory.get();
+        public T read(StreamInput in) throws IOException {
+            return reader.read(in);
         }
 
         @Override
@@ -101,26 +100,25 @@ public String executor() {
     }
 
     static class ProxyRequest extends TransportRequest {
-        T wrapped;
-        Writeable.Reader reader;
-        DiscoveryNode targetNode;
-
-        ProxyRequest(Writeable.Reader reader) {
-            this.reader = reader;
-        }
+        final T wrapped;
+        final DiscoveryNode targetNode;
 
         ProxyRequest(T wrapped, DiscoveryNode targetNode) {
             this.wrapped = wrapped;
             this.targetNode = targetNode;
         }
 
-        @Override
-        public void readFrom(StreamInput in) throws IOException {
-            super.readFrom(in);
+        ProxyRequest(StreamInput in, Writeable.Reader reader) throws IOException {
+            super(in);
             targetNode = new DiscoveryNode(in);
             wrapped = reader.read(in);
         }
 
+        @Override
+        public void readFrom(StreamInput in) throws IOException {
+            throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+        }
+
         @Override
         public void writeTo(StreamOutput out) throws IOException {
             super.writeTo(out);
@@ -133,21 +131,23 @@ public void writeTo(StreamOutput out) throws IOException {
      * Registers a proxy request handler that allows to forward requests for the given action to another node. To be used when the
      * response type changes based on the upcoming request (quite rare)
      */
-    public static void registerProxyAction(TransportService service, String action,
-                                           Function> responseFunction) {
-        RequestHandlerRegistry requestHandler = service.getRequestHandler(action);
-        service.registerRequestHandler(getProxyAction(action), () -> new ProxyRequest(requestHandler::newRequest), ThreadPool.Names.SAME,
-            true, false, new ProxyRequestHandler<>(service, action, responseFunction));
+    public static void registerProxyActionWithDynamicResponseType(TransportService service, String action,
+                                                                  Function> responseFunction) {
+        RequestHandlerRegistry requestHandler = service.getRequestHandler(action);
+        service.registerRequestHandler(getProxyAction(action), ThreadPool.Names.SAME, true, false,
+            in -> new ProxyRequest<>(in, requestHandler::newRequest), new ProxyRequestHandler<>(service, action, responseFunction));
     }
 
     /**
      * Registers a proxy request handler that allows to forward requests for the given action to another node. To be used when the
      * response type is always the same (most of the cases).
      */
-    public static void registerProxyAction(TransportService service, String action, Supplier responseSupplier) {
-        RequestHandlerRegistry requestHandler = service.getRequestHandler(action);
-        service.registerRequestHandler(getProxyAction(action), () -> new ProxyRequest(requestHandler::newRequest), ThreadPool.Names.SAME,
-                true, false, new ProxyRequestHandler<>(service, action, request -> responseSupplier));
+    public static void registerProxyAction(TransportService service, String action,
+                                           Writeable.Reader reader) {
+        RequestHandlerRegistry requestHandler = service.getRequestHandler(action);
+        service.registerRequestHandler(getProxyAction(action), ThreadPool.Names.SAME, true, false,
+            in -> new ProxyRequest<>(in, requestHandler::newRequest), new ProxyRequestHandler<>(service, action, request -> reader));
     }
 
     private static final String PROXY_ACTION_PREFIX = "internal:transport/proxy/";
diff --git a/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java b/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java
index 4ba2769edb4a2..6b45feec94859 100644
--- a/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java
+++ b/server/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java
@@ -21,10 +21,11 @@
 
 import org.apache.logging.log4j.Logger;
 import org.apache.logging.log4j.message.ParameterizedMessage;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.Writeable;
 import org.elasticsearch.threadpool.ThreadPool;
 
 import java.io.IOException;
-import java.util.function.Supplier;
 
 /**
  * Base class for delegating transport response to a transport channel
@@ -34,19 +35,19 @@ public class TransportChannelResponseHandler implem
     private final Logger logger;
     private final TransportChannel channel;
     private final String extraInfoOnError;
-    private final Supplier responseSupplier;
+    private final Writeable.Reader reader;
 
     public TransportChannelResponseHandler(Logger logger, TransportChannel channel, String extraInfoOnError,
-                                           Supplier responseSupplier) {
+                                           Writeable.Reader reader) {
         this.logger = logger;
         this.channel = channel;
         this.extraInfoOnError = extraInfoOnError;
-        this.responseSupplier = responseSupplier;
+        this.reader = reader;
     }
 
     @Override
-    public T newInstance() {
-        return responseSupplier.get();
+    public T read(StreamInput in) throws IOException {
+        return reader.read(in);
     }
 
     @Override
diff --git a/server/src/main/java/org/elasticsearch/transport/TransportMessage.java b/server/src/main/java/org/elasticsearch/transport/TransportMessage.java
index ecaca73b2db57..05deab8eafbf0 100644
--- a/server/src/main/java/org/elasticsearch/transport/TransportMessage.java
+++ b/server/src/main/java/org/elasticsearch/transport/TransportMessage.java
@@ -39,6 +39,19 @@ public TransportAddress remoteAddress() {
         return remoteAddress;
     }
 
+    /**
+     * Constructs a new empty transport message
+     */
+    public TransportMessage() {
+    }
+
+    /**
+     * Constructs a new transport message with the data from the {@link StreamInput}. This is
+     * currently a no-op
+     */
+    public TransportMessage(StreamInput in) throws IOException {
+    }
+
     @Override
     public void readFrom(StreamInput in) throws IOException {
 
diff --git a/server/src/main/java/org/elasticsearch/transport/TransportResponse.java b/server/src/main/java/org/elasticsearch/transport/TransportResponse.java
index 25ae72a479f7d..5ad9c9fee544e 100644
--- a/server/src/main/java/org/elasticsearch/transport/TransportResponse.java
+++ b/server/src/main/java/org/elasticsearch/transport/TransportResponse.java
@@ -19,8 +19,27 @@
 
 package org.elasticsearch.transport;
 
+import org.elasticsearch.common.io.stream.StreamInput;
+
+import java.io.IOException;
+
 public abstract class TransportResponse extends TransportMessage {
 
+    /**
+     * Constructs a new empty transport response
+     */
+    public TransportResponse() {
+    }
+
+    /**
+     * Constructs a new transport response with the data from the {@link StreamInput}. This is
+     * currently a no-op. However, this exists to allow extenders to call super(in)
+     * so that reading can mirror writing where we often call super.writeTo(out).
+     */
+    public TransportResponse(StreamInput in) throws IOException {
+        super(in);
+    }
+
     public static class Empty extends TransportResponse {
         public static final Empty INSTANCE = new Empty();
     }
diff --git a/server/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java b/server/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java
index fbe477ad04b1d..29720216cf400 100644
--- a/server/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java
+++ b/server/src/main/java/org/elasticsearch/transport/TransportResponseHandler.java
@@ -19,34 +19,10 @@
 
 package org.elasticsearch.transport;
 
-import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.io.stream.Writeable;
 
-import java.io.IOException;
-
 public interface TransportResponseHandler extends Writeable.Reader {
 
-    /**
-     * @deprecated Implement {@link #read(StreamInput)} instead.
-     */
-    @Deprecated
-    default T newInstance() {
-        throw new UnsupportedOperationException();
-    }
-
-    /**
-     * deserializes a new instance of the return type from the stream.
-     * called by the infra when de-serializing the response.
-     *
-     * @return the deserialized response.
-     */
-    @Override
-    default T read(StreamInput in) throws IOException {
-        T instance = newInstance();
-        instance.readFrom(in);
-        return instance;
-    }
-
     void handleResponse(T response);
 
     void handleException(TransportException exp);
diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java
index db14fd015fd82..c2ae982b3dce1 100644
--- a/server/src/main/java/org/elasticsearch/transport/TransportService.java
+++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java
@@ -434,8 +434,8 @@ public HandshakeResponse handshake(
             PlainTransportFuture futureHandler = new PlainTransportFuture<>(
                 new FutureTransportResponseHandler() {
                 @Override
-                public HandshakeResponse newInstance() {
-                    return new HandshakeResponse();
+                public HandshakeResponse read(StreamInput in) throws IOException {
+                    return new HandshakeResponse(in);
                 }
             });
             sendRequest(connection, HANDSHAKE_ACTION_NAME, HandshakeRequest.INSTANCE,
@@ -468,12 +468,9 @@ private HandshakeRequest() {
     }
 
     public static class HandshakeResponse extends TransportResponse {
-        private DiscoveryNode discoveryNode;
-        private ClusterName clusterName;
-        private Version version;
-
-        HandshakeResponse() {
-        }
+        private final DiscoveryNode discoveryNode;
+        private final ClusterName clusterName;
+        private final Version version;
 
         public HandshakeResponse(DiscoveryNode discoveryNode, ClusterName clusterName, Version version) {
             this.discoveryNode = discoveryNode;
@@ -481,9 +478,8 @@ public HandshakeResponse(DiscoveryNode discoveryNode, ClusterName clusterName, V
             this.clusterName = clusterName;
         }
 
-        @Override
-        public void readFrom(StreamInput in) throws IOException {
-            super.readFrom(in);
+        public HandshakeResponse(StreamInput in) throws IOException {
+            super(in);
             discoveryNode = in.readOptionalWriteable(DiscoveryNode::new);
             clusterName = new ClusterName(in);
             version = Version.readVersion(in);
@@ -930,7 +926,7 @@ public void onRequestReceived(long requestId, String action) {
         }
     }
 
-    public RequestHandlerRegistry getRequestHandler(String action) {
+    public RequestHandlerRegistry getRequestHandler(String action) {
         return transport.getRequestHandler(action);
     }
 
@@ -977,8 +973,8 @@ private void checkForTimeout(long requestId) {
     @Override
     public void onConnectionClosed(Transport.Connection connection) {
         try {
-            List pruned = responseHandlers.prune(h -> h.connection().getCacheKey().equals(connection
-                .getCacheKey()));
+            List> pruned =
+                responseHandlers.prune(h -> h.connection().getCacheKey().equals(connection.getCacheKey()));
             // callback that an exception happened, but on a different thread since we don't
             // want handlers to worry about stack overflows
             getExecutorService().execute(() -> {
diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java
index f685be02141ad..fbfe0e497017f 100644
--- a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java
+++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java
@@ -83,8 +83,7 @@ public void testSerialization() throws Exception {
             clusterSearchShardsResponse.writeTo(out);
             try(StreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry)) {
                 in.setVersion(version);
-                ClusterSearchShardsResponse deserialized = new ClusterSearchShardsResponse();
-                deserialized.readFrom(in);
+                ClusterSearchShardsResponse deserialized = new ClusterSearchShardsResponse(in);
                 assertArrayEquals(clusterSearchShardsResponse.getNodes(), deserialized.getNodes());
                 assertEquals(clusterSearchShardsResponse.getGroups().length, deserialized.getGroups().length);
                 for (int i = 0; i < clusterSearchShardsResponse.getGroups().length; i++) {
diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java
index c763709a04e40..e529af97c800d 100644
--- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java
+++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java
@@ -254,7 +254,7 @@ public void testBuildClusters() {
             remoteIndices.put(cluster, randomOriginalIndices());
             if (onlySuccessful || randomBoolean()) {
                 //whatever response counts as successful as long as it's not the empty placeholder
-                searchShardsResponses.put(cluster, new ClusterSearchShardsResponse());
+                searchShardsResponses.put(cluster, new ClusterSearchShardsResponse(null, null, null));
                 successful++;
             } else {
                 searchShardsResponses.put(cluster, ClusterSearchShardsResponse.EMPTY);
diff --git a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java
index d64cdf89ef7ea..41d691c95bd90 100644
--- a/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java
+++ b/server/src/test/java/org/elasticsearch/client/transport/TransportClientNodesServiceTests.java
@@ -253,8 +253,8 @@ public void onFailure(Exception e) {
                     iteration.transportService.sendRequest(node, "action", new TestRequest(),
                             TransportRequestOptions.EMPTY, new TransportResponseHandler() {
                         @Override
-                        public TestResponse newInstance() {
-                            return new TestResponse();
+                        public TestResponse read(StreamInput in) {
+                            return new TestResponse(in);
                         }
 
                         @Override
@@ -435,5 +435,7 @@ public static class TestRequest extends TransportRequest {
 
     private static class TestResponse extends TransportResponse {
 
+        private TestResponse() {}
+        private TestResponse(StreamInput in) {}
     }
 }
diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java
index 46364c19ee0ec..6c27680d74162 100644
--- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java
+++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java
@@ -172,9 +172,7 @@ public void testRemoteProfileIsUsedForLocalCluster() throws Exception {
                         new FutureTransportResponseHandler() {
                             @Override
                             public ClusterSearchShardsResponse read(StreamInput in) throws IOException {
-                                ClusterSearchShardsResponse inst = new ClusterSearchShardsResponse();
-                                inst.readFrom(in);
-                                return inst;
+                                return new ClusterSearchShardsResponse(in);
                             }
                         });
                     TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.BULK)
@@ -215,9 +213,7 @@ public void testRemoteProfileIsUsedForRemoteCluster() throws Exception {
                         new FutureTransportResponseHandler() {
                             @Override
                             public ClusterSearchShardsResponse read(StreamInput in) throws IOException {
-                                ClusterSearchShardsResponse inst = new ClusterSearchShardsResponse();
-                                inst.readFrom(in);
-                                return inst;
+                                return new ClusterSearchShardsResponse(in);
                             }
                         });
                     TransportRequestOptions options = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.BULK)
@@ -233,9 +229,7 @@ public ClusterSearchShardsResponse read(StreamInput in) throws IOException {
                         new FutureTransportResponseHandler() {
                             @Override
                             public ClusterSearchShardsResponse read(StreamInput in) throws IOException {
-                                ClusterSearchShardsResponse inst = new ClusterSearchShardsResponse();
-                                inst.readFrom(in);
-                                return inst;
+                                return new ClusterSearchShardsResponse(in);
                             }
                         });
                     TransportRequestOptions ops = TransportRequestOptions.builder().withType(TransportRequestOptions.Type.REG)
diff --git a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java
index 428d416ac0242..7d52c12e47364 100644
--- a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java
+++ b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java
@@ -86,8 +86,7 @@ public void testSendMessage() throws InterruptedException {
         serviceA.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME,
             (request, channel, task) -> {
                 assertEquals(request.sourceNode, "TS_A");
-                SimpleTestResponse response = new SimpleTestResponse();
-                response.targetNode = "TS_A";
+                SimpleTestResponse response = new SimpleTestResponse("TS_A");
                 channel.sendResponse(response);
             });
         TransportActionProxy.registerProxyAction(serviceA, "internal:test", SimpleTestResponse::new);
@@ -96,8 +95,7 @@ public void testSendMessage() throws InterruptedException {
         serviceB.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME,
             (request, channel, task) -> {
                 assertEquals(request.sourceNode, "TS_A");
-                SimpleTestResponse response = new SimpleTestResponse();
-                response.targetNode = "TS_B";
+                SimpleTestResponse response = new SimpleTestResponse("TS_B");
                 channel.sendResponse(response);
             });
         TransportActionProxy.registerProxyAction(serviceB, "internal:test", SimpleTestResponse::new);
@@ -105,8 +103,7 @@ public void testSendMessage() throws InterruptedException {
         serviceC.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME,
             (request, channel, task) -> {
                 assertEquals(request.sourceNode, "TS_A");
-                SimpleTestResponse response = new SimpleTestResponse();
-                response.targetNode = "TS_C";
+                SimpleTestResponse response = new SimpleTestResponse("TS_C");
                 channel.sendResponse(response);
             });
         TransportActionProxy.registerProxyAction(serviceC, "internal:test", SimpleTestResponse::new);
@@ -115,8 +112,8 @@ public void testSendMessage() throws InterruptedException {
         serviceA.sendRequest(nodeB, TransportActionProxy.getProxyAction("internal:test"), TransportActionProxy.wrapRequest(nodeC,
             new SimpleTestRequest("TS_A")), new TransportResponseHandler() {
                 @Override
-                public SimpleTestResponse newInstance() {
-                    return new SimpleTestResponse();
+                public SimpleTestResponse read(StreamInput in) throws IOException {
+                    return new SimpleTestResponse(in);
                 }
 
                 @Override
@@ -131,7 +128,7 @@ public void handleResponse(SimpleTestResponse response) {
                 @Override
                 public void handleException(TransportException exp) {
                     try {
-                    throw new AssertionError(exp);
+                        throw new AssertionError(exp);
                     } finally {
                         latch.countDown();
                     }
@@ -149,8 +146,7 @@ public void testException() throws InterruptedException {
         serviceA.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME,
             (request, channel, task) -> {
                 assertEquals(request.sourceNode, "TS_A");
-                SimpleTestResponse response = new SimpleTestResponse();
-                response.targetNode = "TS_A";
+                SimpleTestResponse response = new SimpleTestResponse("TS_A");
                 channel.sendResponse(response);
             });
         TransportActionProxy.registerProxyAction(serviceA, "internal:test", SimpleTestResponse::new);
@@ -159,8 +155,7 @@ public void testException() throws InterruptedException {
         serviceB.registerRequestHandler("internal:test", SimpleTestRequest::new, ThreadPool.Names.SAME,
             (request, channel, task) -> {
                 assertEquals(request.sourceNode, "TS_A");
-                SimpleTestResponse response = new SimpleTestResponse();
-                response.targetNode = "TS_B";
+                SimpleTestResponse response = new SimpleTestResponse("TS_B");
                 channel.sendResponse(response);
             });
         TransportActionProxy.registerProxyAction(serviceB, "internal:test", SimpleTestResponse::new);
@@ -175,8 +170,8 @@ public void testException() throws InterruptedException {
         serviceA.sendRequest(nodeB, TransportActionProxy.getProxyAction("internal:test"), TransportActionProxy.wrapRequest(nodeC,
             new SimpleTestRequest("TS_A")), new TransportResponseHandler() {
                 @Override
-                public SimpleTestResponse newInstance() {
-                    return new SimpleTestResponse();
+                public SimpleTestResponse read(StreamInput in) throws IOException {
+                    return new SimpleTestResponse(in);
                 }
 
                 @Override
@@ -228,11 +223,20 @@ public void writeTo(StreamOutput out) throws IOException {
     }
 
     public static class SimpleTestResponse extends TransportResponse {
-        String targetNode;
+        final String targetNode;
+
+        SimpleTestResponse(String targetNode) {
+            this.targetNode = targetNode;
+        }
+
+        SimpleTestResponse(StreamInput in) throws IOException {
+            super(in);
+            this.targetNode = in.readString();
+        }
+
         @Override
         public void readFrom(StreamInput in) throws IOException {
-            super.readFrom(in);
-            targetNode = in.readString();
+            throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
         }
 
         @Override
@@ -263,7 +267,7 @@ public void testIsProxyAction() {
     }
 
     public void testIsProxyRequest() {
-        assertTrue(TransportActionProxy.isProxyRequest(new TransportActionProxy.ProxyRequest<>((in) -> null)));
+        assertTrue(TransportActionProxy.isProxyRequest(new TransportActionProxy.ProxyRequest<>(TransportRequest.Empty.INSTANCE, null)));
         assertFalse(TransportActionProxy.isProxyRequest(TransportRequest.Empty.INSTANCE));
     }
 }
diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java
index 132a07d5b7f48..1b8405a2d591a 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java
@@ -47,6 +47,7 @@
 import org.elasticsearch.transport.TransportRequest;
 import org.elasticsearch.transport.TransportRequestOptions;
 import org.elasticsearch.transport.TransportResponse;
+import org.elasticsearch.transport.TransportResponseHandler;
 import org.elasticsearch.transport.TransportService;
 import org.elasticsearch.transport.TransportStats;
 
@@ -163,8 +164,10 @@ public void clear() {
     /**
      * simulate a response for the given requestId
      */
-    public void handleResponse(final long requestId, final TransportResponse response) {
-        responseHandlers.onResponseReceived(requestId, listener).handleResponse(response);
+    public  void handleResponse(final long requestId, final Response response) {
+        TransportResponseHandler handler =
+            (TransportResponseHandler) responseHandlers.onResponseReceived(requestId, listener);
+        handler.handleResponse(response);
     }
 
     /**
diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java
index 85a654c4cac36..f4cf6e09642de 100644
--- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java
@@ -233,8 +233,8 @@ public void testHelloWorld() {
         TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHello",
             new StringMessageRequest("moshe"), new TransportResponseHandler() {
                 @Override
-                public StringMessageResponse newInstance() {
-                    return new StringMessageResponse();
+                public StringMessageResponse read(StreamInput in) throws IOException {
+                    return new StringMessageResponse(in);
                 }
 
                 @Override
@@ -264,8 +264,8 @@ public void handleException(TransportException exp) {
         res = serviceB.submitRequest(nodeA, "internal:sayHello", new StringMessageRequest("moshe"),
             TransportRequestOptions.builder().withCompress(true).build(), new TransportResponseHandler() {
                 @Override
-                public StringMessageResponse newInstance() {
-                    return new StringMessageResponse();
+                public StringMessageResponse read(StreamInput in) throws IOException {
+                    return new StringMessageResponse(in);
                 }
 
                 @Override
@@ -312,8 +312,8 @@ public void testThreadContext() throws ExecutionException, InterruptedException
         final String executor = randomFrom(ThreadPool.THREAD_POOL_TYPES.keySet().toArray(new String[0]));
         TransportResponseHandler responseHandler = new TransportResponseHandler() {
             @Override
-            public StringMessageResponse newInstance() {
-                return new StringMessageResponse();
+            public StringMessageResponse read(StreamInput in) throws IOException {
+                return new StringMessageResponse(in);
             }
 
             @Override
@@ -367,8 +367,8 @@ public void testLocalNodeConnection() throws InterruptedException {
         serviceA.sendRequest(nodeA, "internal:localNode", new StringMessageRequest("test"),
             new TransportResponseHandler() {
             @Override
-            public StringMessageResponse newInstance() {
-                return new StringMessageResponse();
+            public StringMessageResponse read(StreamInput in) throws IOException {
+                return new StringMessageResponse(in);
             }
 
             @Override
@@ -516,7 +516,7 @@ public void testVoidMessageCompressed() {
             TransportRequest.Empty.INSTANCE, TransportRequestOptions.builder().withCompress(true).build(),
             new TransportResponseHandler() {
                 @Override
-                public TransportResponse.Empty newInstance() {
+                public TransportResponse.Empty read(StreamInput in) {
                     return TransportResponse.Empty.INSTANCE;
                 }
 
@@ -564,8 +564,8 @@ public void messageReceived(StringMessageRequest request, TransportChannel chann
             new StringMessageRequest("moshe"), TransportRequestOptions.builder().withCompress(true).build(),
             new TransportResponseHandler() {
                 @Override
-                public StringMessageResponse newInstance() {
-                    return new StringMessageResponse();
+                public StringMessageResponse read(StreamInput in) throws IOException {
+                    return new StringMessageResponse(in);
                 }
 
                 @Override
@@ -606,8 +606,8 @@ public void messageReceived(StringMessageRequest request, TransportChannel chann
         TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHelloException",
             new StringMessageRequest("moshe"), new TransportResponseHandler() {
                 @Override
-                public StringMessageResponse newInstance() {
-                    return new StringMessageResponse();
+                public StringMessageResponse read(StreamInput in) throws IOException {
+                    return new StringMessageResponse(in);
                 }
 
                 @Override
@@ -658,7 +658,7 @@ public void testConcurrentSendRespondAndDisconnect() throws BrokenBarrierExcepti
         serviceA.registerRequestHandler("internal:test", TestRequest::new,
             randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC, (request, channel, task) -> {
                 try {
-                    channel.sendResponse(new TestResponse());
+                    channel.sendResponse(new TestResponse((String) null));
                 } catch (Exception e) {
                     logger.info("caught exception while responding", e);
                     responseErrors.add(e);
@@ -666,7 +666,7 @@ public void testConcurrentSendRespondAndDisconnect() throws BrokenBarrierExcepti
             });
         final TransportRequestHandler ignoringRequestHandler = (request, channel, task) -> {
             try {
-                channel.sendResponse(new TestResponse());
+                channel.sendResponse(new TestResponse((String) null));
             } catch (Exception e) {
                 // we don't really care what's going on B, we're testing through A
                 logger.trace("caught exception while responding from node B", e);
@@ -822,8 +822,8 @@ public void messageReceived(StringMessageRequest request, TransportChannel chann
             new StringMessageRequest("moshe"), TransportRequestOptions.builder().withTimeout(100).build(),
             new TransportResponseHandler() {
                 @Override
-                public StringMessageResponse newInstance() {
-                    return new StringMessageResponse();
+                public StringMessageResponse read(StreamInput in) throws IOException {
+                    return new StringMessageResponse(in);
                 }
 
                 @Override
@@ -886,8 +886,8 @@ public void messageReceived(StringMessageRequest request, TransportChannel chann
             new StringMessageRequest("forever"), TransportRequestOptions.builder().withTimeout(100).build(),
             new TransportResponseHandler() {
                 @Override
-                public StringMessageResponse newInstance() {
-                    return new StringMessageResponse();
+                public StringMessageResponse read(StreamInput in) throws IOException {
+                    return new StringMessageResponse(in);
                 }
 
                 @Override
@@ -924,8 +924,8 @@ public void handleException(TransportException exp) {
                 new StringMessageRequest(counter + "ms"), TransportRequestOptions.builder().withTimeout(3000).build(),
                 new TransportResponseHandler() {
                     @Override
-                    public StringMessageResponse newInstance() {
-                        return new StringMessageResponse();
+                    public StringMessageResponse read(StreamInput in) throws IOException {
+                        return new StringMessageResponse(in);
                     }
 
                     @Override
@@ -975,8 +975,8 @@ public void messageReceived(StringMessageRequest request, TransportChannel chann
         TransportResponseHandler noopResponseHandler = new TransportResponseHandler() {
 
             @Override
-            public StringMessageResponse newInstance() {
-                return new StringMessageResponse();
+            public StringMessageResponse read(StreamInput in) throws IOException {
+                return new StringMessageResponse(in);
             }
 
             @Override
@@ -1174,19 +1174,19 @@ public void writeTo(StreamOutput out) throws IOException {
 
     static class StringMessageResponse extends TransportResponse {
 
-        private String message;
+        private final String message;
 
         StringMessageResponse(String message) {
             this.message = message;
         }
 
-        StringMessageResponse() {
+        StringMessageResponse(StreamInput in) throws IOException {
+            this.message = in.readString();
         }
 
         @Override
         public void readFrom(StreamInput in) throws IOException {
-            super.readFrom(in);
-            message = in.readString();
+            throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
         }
 
         @Override
@@ -1238,12 +1238,19 @@ public void writeTo(StreamOutput out) throws IOException {
 
     static class Version0Response extends TransportResponse {
 
-        int value1;
+        final int value1;
+
+        Version0Response(int value1) {
+            this.value1 = value1;
+        }
+
+        Version0Response(StreamInput in) throws IOException {
+            this.value1 = in.readInt();
+        }
 
         @Override
         public void readFrom(StreamInput in) throws IOException {
-            super.readFrom(in);
-            value1 = in.readInt();
+            throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
         }
 
         @Override
@@ -1255,16 +1262,27 @@ public void writeTo(StreamOutput out) throws IOException {
 
     static class Version1Response extends Version0Response {
 
-        int value2;
+        final int value2;
 
-        @Override
-        public void readFrom(StreamInput in) throws IOException {
-            super.readFrom(in);
+        Version1Response(int value1, int value2) {
+            super(value1);
+            this.value2 = value2;
+        }
+
+        Version1Response(StreamInput in) throws IOException {
+            super(in);
             if (in.getVersion().onOrAfter(version1)) {
                 value2 = in.readInt();
+            } else {
+                value2 = 0;
             }
         }
 
+        @Override
+        public void readFrom(StreamInput in) throws IOException {
+            throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
+        }
+
         @Override
         public void writeTo(StreamOutput out) throws IOException {
             super.writeTo(out);
@@ -1281,9 +1299,7 @@ public void testVersionFrom0to1() throws Exception {
                 public void messageReceived(Version1Request request, TransportChannel channel, Task task) throws Exception {
                     assertThat(request.value1, equalTo(1));
                     assertThat(request.value2, equalTo(0)); // not set, coming from service A
-                    Version1Response response = new Version1Response();
-                    response.value1 = 1;
-                    response.value2 = 2;
+                    Version1Response response = new Version1Response(1, 2);
                     channel.sendResponse(response);
                     assertEquals(version0, channel.getVersion());
                 }
@@ -1294,8 +1310,8 @@ public void messageReceived(Version1Request request, TransportChannel channel, T
         Version0Response version0Response = serviceA.submitRequest(nodeB, "internal:version", version0Request,
             new TransportResponseHandler() {
                 @Override
-                public Version0Response newInstance() {
-                    return new Version0Response();
+                public Version0Response read(StreamInput in) throws IOException {
+                    return new Version0Response(in);
                 }
 
                 @Override
@@ -1324,8 +1340,7 @@ public void testVersionFrom1to0() throws Exception {
                 @Override
                 public void messageReceived(Version0Request request, TransportChannel channel, Task task) throws Exception {
                     assertThat(request.value1, equalTo(1));
-                    Version0Response response = new Version0Response();
-                    response.value1 = 1;
+                    Version0Response response = new Version0Response(1);
                     channel.sendResponse(response);
                     assertEquals(version0, channel.getVersion());
                 }
@@ -1337,8 +1352,8 @@ public void messageReceived(Version0Request request, TransportChannel channel, T
         Version1Response version1Response = serviceB.submitRequest(nodeA, "internal:version", version1Request,
             new TransportResponseHandler() {
                 @Override
-                public Version1Response newInstance() {
-                    return new Version1Response();
+                public Version1Response read(StreamInput in) throws IOException {
+                    return new Version1Response(in);
                 }
 
                 @Override
@@ -1368,9 +1383,7 @@ public void testVersionFrom1to1() throws Exception {
             (request, channel, task) -> {
                 assertThat(request.value1, equalTo(1));
                 assertThat(request.value2, equalTo(2));
-                Version1Response response = new Version1Response();
-                response.value1 = 1;
-                response.value2 = 2;
+                Version1Response response = new Version1Response(1, 2);
                 channel.sendResponse(response);
                 assertEquals(version1, channel.getVersion());
             });
@@ -1381,8 +1394,8 @@ public void testVersionFrom1to1() throws Exception {
         Version1Response version1Response = serviceB.submitRequest(nodeB, "internal:version", version1Request,
             new TransportResponseHandler() {
                 @Override
-                public Version1Response newInstance() {
-                    return new Version1Response();
+                public Version1Response read(StreamInput in) throws IOException {
+                    return new Version1Response(in);
                 }
 
                 @Override
@@ -1411,8 +1424,7 @@ public void testVersionFrom0to0() throws Exception {
         serviceA.registerRequestHandler("internal:version", Version0Request::new, ThreadPool.Names.SAME,
             (request, channel, task) -> {
                 assertThat(request.value1, equalTo(1));
-                Version0Response response = new Version0Response();
-                response.value1 = 1;
+                Version0Response response = new Version0Response(1);
                 channel.sendResponse(response);
                 assertEquals(version0, channel.getVersion());
             });
@@ -1422,8 +1434,8 @@ public void testVersionFrom0to0() throws Exception {
         Version0Response version0Response = serviceA.submitRequest(nodeA, "internal:version", version0Request,
             new TransportResponseHandler() {
                 @Override
-                public Version0Response newInstance() {
-                    return new Version0Response();
+                public Version0Response read(StreamInput in) throws IOException {
+                    return new Version0Response(in);
                 }
 
                 @Override
@@ -1458,8 +1470,8 @@ public void testMockFailToSendNoConnectRule() throws Exception {
         TransportFuture res = serviceB.submitRequest(nodeA, "internal:sayHello",
             new StringMessageRequest("moshe"), new TransportResponseHandler() {
                 @Override
-                public StringMessageResponse newInstance() {
-                    return new StringMessageResponse();
+                public StringMessageResponse read(StreamInput in) throws IOException {
+                    return new StringMessageResponse(in);
                 }
 
                 @Override
@@ -1516,8 +1528,8 @@ public void testMockUnresponsiveRule() throws IOException {
             new StringMessageRequest("moshe"), TransportRequestOptions.builder().withTimeout(100).build(),
             new TransportResponseHandler() {
                 @Override
-                public StringMessageResponse newInstance() {
-                    return new StringMessageResponse();
+                public StringMessageResponse read(StreamInput in) throws IOException {
+                    return new StringMessageResponse(in);
                 }
 
                 @Override
@@ -1561,13 +1573,13 @@ public void testHostOnMessages() throws InterruptedException {
         final AtomicReference addressB = new AtomicReference<>();
         serviceB.registerRequestHandler("internal:action1", TestRequest::new, ThreadPool.Names.SAME, (request, channel, task) -> {
             addressA.set(request.remoteAddress());
-            channel.sendResponse(new TestResponse());
+            channel.sendResponse(new TestResponse((String) null));
             latch.countDown();
         });
         serviceA.sendRequest(nodeB, "internal:action1", new TestRequest(), new TransportResponseHandler() {
             @Override
-            public TestResponse newInstance() {
-                return new TestResponse();
+            public TestResponse read(StreamInput in) throws IOException {
+                return new TestResponse(in);
             }
 
             @Override
@@ -1614,8 +1626,8 @@ public void testBlockingIncomingRequests() throws Exception {
                 serviceA.sendRequest(connection, "internal:action", new TestRequest(), TransportRequestOptions.EMPTY,
                     new TransportResponseHandler() {
                         @Override
-                        public TestResponse newInstance() {
-                            return new TestResponse();
+                        public TestResponse read(StreamInput in) throws IOException {
+                            return new TestResponse(in);
                         }
 
                         @Override
@@ -1680,9 +1692,10 @@ public String toString() {
 
     private static class TestResponse extends TransportResponse {
 
-        String info;
+        final String info;
 
-        TestResponse() {
+        TestResponse(StreamInput in) throws IOException {
+            this.info = in.readOptionalString();
         }
 
         TestResponse(String info) {
@@ -1691,8 +1704,7 @@ private static class TestResponse extends TransportResponse {
 
         @Override
         public void readFrom(StreamInput in) throws IOException {
-            super.readFrom(in);
-            info = in.readOptionalString();
+            throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
         }
 
         @Override
@@ -1777,8 +1789,8 @@ public void messageReceived(TestRequest request, TransportChannel channel, Task
                         TransportRequestOptions.builder().withCompress(randomBoolean()).build(),
                         new TransportResponseHandler() {
                             @Override
-                            public TestResponse newInstance() {
-                                return new TestResponse();
+                            public TestResponse read(StreamInput in) throws IOException {
+                                return new TestResponse(in);
                             }
 
                             @Override
@@ -1834,8 +1846,8 @@ class TestResponseHandler implements TransportResponseHandler {
             }
 
             @Override
-            public TestResponse newInstance() {
-                return new TestResponse();
+            public TestResponse read(StreamInput in) throws IOException {
+                return new TestResponse(in);
             }
 
             @Override
@@ -2100,7 +2112,7 @@ public void testResponseHeadersArePreserved() throws InterruptedException {
 
         TransportResponseHandler transportResponseHandler = new TransportResponseHandler() {
             @Override
-            public TransportResponse newInstance() {
+            public TransportResponse read(StreamInput in) {
                 return TransportResponse.Empty.INSTANCE;
             }
 
@@ -2154,7 +2166,7 @@ public void testHandlerIsInvokedOnConnectionClose() throws IOException, Interrup
         CountDownLatch latch = new CountDownLatch(1);
         TransportResponseHandler transportResponseHandler = new TransportResponseHandler() {
             @Override
-            public TransportResponse newInstance() {
+            public TransportResponse read(StreamInput in) {
                 return TransportResponse.Empty.INSTANCE;
             }
 
@@ -2231,7 +2243,7 @@ protected void doRun() throws Exception {
         CountDownLatch responseLatch = new CountDownLatch(1);
         TransportResponseHandler transportResponseHandler = new TransportResponseHandler() {
             @Override
-            public TransportResponse newInstance() {
+            public TransportResponse read(StreamInput in) {
                 return TransportResponse.Empty.INSTANCE;
             }
 
@@ -2299,7 +2311,7 @@ protected void doRun() throws Exception {
         CountDownLatch responseLatch = new CountDownLatch(1);
         TransportResponseHandler transportResponseHandler = new TransportResponseHandler() {
             @Override
-            public TransportResponse newInstance() {
+            public TransportResponse read(StreamInput in) {
                 return TransportResponse.Empty.INSTANCE;
             }
 
@@ -2413,7 +2425,7 @@ protected void doRun() throws Exception {
         AtomicReference receivedException = new AtomicReference<>(null);
         TransportResponseHandler transportResponseHandler = new TransportResponseHandler() {
             @Override
-            public TransportResponse newInstance() {
+            public TransportResponse read(StreamInput in) {
                 return TransportResponse.Empty.INSTANCE;
             }
 
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java
index a7351ccfe14d1..1b85049da235b 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java
@@ -10,6 +10,7 @@
 import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.block.ClusterBlocks;
 import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.settings.ClusterSettings;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.util.concurrent.ThreadContext;
@@ -335,7 +336,7 @@ public void testContextRestoreResponseHandler() throws Exception {
                     threadContext.wrapRestorable(storedContext), new TransportResponseHandler() {
 
                 @Override
-                public Empty newInstance() {
+                public Empty read(StreamInput in) {
                     return Empty.INSTANCE;
                 }
 
@@ -374,7 +375,7 @@ public void testContextRestoreResponseHandlerRestoreOriginalContext() throws Exc
                         new TransportResponseHandler() {
 
                             @Override
-                            public Empty newInstance() {
+                            public Empty read(StreamInput in) {
                                 return Empty.INSTANCE;
                             }
 
diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java
index abd5768bebec9..6ff18cc77a1e2 100644
--- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java
+++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java
@@ -9,6 +9,7 @@
 import org.elasticsearch.Version;
 import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
 import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.common.io.stream.StreamInput;
 import org.elasticsearch.common.network.NetworkAddress;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.transport.TransportAddress;
@@ -176,8 +177,12 @@ public void testThatConnectionToClientTypeConnectionIsRejected() throws IOExcept
                         TransportRequestOptions.EMPTY,
                         new TransportResponseHandler() {
                     @Override
-                    public TransportResponse newInstance() {
-                        fail("never get that far");
+                    public TransportResponse read(StreamInput in) {
+                        try {
+                            fail("never get that far");
+                        } finally {
+                            latch.countDown();
+                        }
                         return null;
                     }
 

From fdfdbe486d9e31530a483dcec3c77bccf7f398b4 Mon Sep 17 00:00:00 2001
From: Jason Tedor 
Date: Fri, 26 Oct 2018 11:23:35 -0400
Subject: [PATCH 9/9] Introduce cross-cluster replication API docs (#34726)

This commit is our first introduction to cross-cluster replication
docs. In this commit, we introduce the cross-cluster replication API
docs. We also add skelton docs for additional content that will be added
in a series of follow-up commits.
---
 docs/build.gradle                             |  18 +-
 .../delete-auto-follow-pattern.asciidoc       |  68 +++++
 .../get-auto-follow-pattern.asciidoc          |  93 +++++++
 .../get-auto-follow-stats.asciidoc            |  46 ++++
 .../put-auto-follow-pattern.asciidoc          | 118 ++++++++
 docs/reference/ccr/apis/ccr-apis.asciidoc     |  38 +++
 .../ccr/apis/follow-request-body.asciidoc     |  44 +++
 .../ccr/apis/follow/get-follow-stats.asciidoc | 254 ++++++++++++++++++
 .../apis/follow/post-pause-follow.asciidoc    |  68 +++++
 .../apis/follow/post-resume-follow.asciidoc   |  94 +++++++
 .../ccr/apis/follow/post-unfollow.asciidoc    |  75 ++++++
 .../ccr/apis/follow/put-follow.asciidoc       |  94 +++++++
 docs/reference/ccr/getting-started.asciidoc   |   6 +
 docs/reference/ccr/index.asciidoc             |  15 ++
 docs/reference/ccr/overview.asciidoc          |   6 +
 docs/reference/index.asciidoc                 |   2 +
 docs/reference/rest-api/index.asciidoc        |   2 +
 17 files changed, 1040 insertions(+), 1 deletion(-)
 create mode 100644 docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc
 create mode 100644 docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc
 create mode 100644 docs/reference/ccr/apis/auto-follow/get-auto-follow-stats.asciidoc
 create mode 100644 docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc
 create mode 100644 docs/reference/ccr/apis/ccr-apis.asciidoc
 create mode 100644 docs/reference/ccr/apis/follow-request-body.asciidoc
 create mode 100644 docs/reference/ccr/apis/follow/get-follow-stats.asciidoc
 create mode 100644 docs/reference/ccr/apis/follow/post-pause-follow.asciidoc
 create mode 100644 docs/reference/ccr/apis/follow/post-resume-follow.asciidoc
 create mode 100644 docs/reference/ccr/apis/follow/post-unfollow.asciidoc
 create mode 100644 docs/reference/ccr/apis/follow/put-follow.asciidoc
 create mode 100644 docs/reference/ccr/getting-started.asciidoc
 create mode 100644 docs/reference/ccr/index.asciidoc
 create mode 100644 docs/reference/ccr/overview.asciidoc

diff --git a/docs/build.gradle b/docs/build.gradle
index ce560e1ca4208..99f82d95b585f 100644
--- a/docs/build.gradle
+++ b/docs/build.gradle
@@ -139,7 +139,6 @@ buildRestTests.setups['host'] = '''
   - do:
       nodes.info:
         metric: [ http, transport ]
-  - is_true: nodes.$master.http.publish_address
   - set: {nodes.$master.http.publish_address: host}
   - set: {nodes.$master.transport.publish_address: transport_host}
 '''
@@ -1083,4 +1082,21 @@ buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['cale
            ]}
 '''
 
+buildRestTests.setups['remote_cluster'] = buildRestTests.setups['host'] + '''
+  - do:
+      cluster.put_settings:
+        body:
+          persistent:
+            cluster.remote.remote_cluster.seeds: $transport_host
+'''
 
+buildRestTests.setups['remote_cluster_and_leader_index'] = buildRestTests.setups['remote_cluster'] + '''
+  - do:
+      indices.create:
+        index: leader_index
+        body:
+          settings:
+            index.number_of_replicas: 0
+            index.number_of_shards: 1
+            index.soft_deletes.enabled: true
+'''
diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc
new file mode 100644
index 0000000000000..301c7f7da4998
--- /dev/null
+++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc
@@ -0,0 +1,68 @@
+[role="xpack"]
+[testenv="platinum"]
+[[ccr-delete-auto-follow-pattern]]
+=== Delete Auto-Follow Pattern API
+++++
+Delete Auto-Follow Pattern
+++++
+
+Delete auto-follow patterns.
+
+==== Description
+
+This API deletes a configured auto-follow pattern collection.
+
+==== Request
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+PUT /_ccr/auto_follow/my_auto_follow_pattern
+{
+  "remote_cluster" : "remote_cluster",
+  "leader_index_patterns" :
+  [
+    "leader_index"
+  ],
+  "follow_index_pattern" : "{{leader_index}}-follower"
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:remote_cluster]
+// TESTSETUP
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+DELETE /_ccr/auto_follow/
+--------------------------------------------------
+// CONSOLE
+// TEST[s//my_auto_follow_pattern/]
+
+==== Path Parameters
+`auto_follow_pattern_name` (required)::
+  (string) specifies the auto-follow pattern collection to delete
+
+==== Example
+
+This example deletes an auto-follow pattern collection named
+`my_auto_follow_pattern`:
+
+[source,js]
+--------------------------------------------------
+DELETE /_ccr/auto_follow/my_auto_follow_pattern
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:remote_cluster]
+
+The API returns the following result:
+
+[source,js]
+--------------------------------------------------
+{
+  "acknowledged" : true
+}
+--------------------------------------------------
+// TESTRESPONSE
diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc
new file mode 100644
index 0000000000000..1ff9c9943c9df
--- /dev/null
+++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc
@@ -0,0 +1,93 @@
+[role="xpack"]
+[testenv="platinum"]
+[[ccr-get-auto-follow-pattern]]
+=== Get Auto-Follow Pattern API
+++++
+Get Auto-Follow Pattern
+++++
+
+Get auto-follow patterns.
+
+==== Description
+
+This API gets configured auto-follow patterns. This API will return the
+specified auto-follow pattern collection.
+
+==== Request
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+PUT /_ccr/auto_follow/my_auto_follow_pattern
+{
+  "remote_cluster" : "remote_cluster",
+  "leader_index_patterns" :
+  [
+    "leader_index*"
+  ],
+  "follow_index_pattern" : "{{leader_index}}-follower"
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:remote_cluster]
+// TESTSETUP
+
+[source,js]
+--------------------------------------------------
+DELETE /_ccr/auto_follow/my_auto_follow_pattern
+--------------------------------------------------
+// CONSOLE
+// TEST
+// TEARDOWN
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+GET /_ccr/auto_follow/
+--------------------------------------------------
+// CONSOLE
+
+[source,js]
+--------------------------------------------------
+GET /_ccr/auto_follow/
+--------------------------------------------------
+// CONSOLE
+// TEST[s//my_auto_follow_pattern/]
+
+==== Path Parameters
+`auto_follow_pattern_name`::
+  (string) specifies the auto-follow pattern collection that you want to
+  retrieve; if you do not specify a name, the API returns information for all
+  collections
+
+==== Example
+
+This example retrieves information about an auto-follow pattern collection
+named `my_auto_follow_pattern`:
+
+[source,js]
+--------------------------------------------------
+GET /_ccr/auto_follow/my_auto_follow_pattern
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:remote_cluster]
+
+The API returns the following result:
+
+[source,js]
+--------------------------------------------------
+{
+  "my_auto_follow_pattern" :
+  {
+    "remote_cluster" : "remote_cluster",
+    "leader_index_patterns" :
+    [
+      "leader_index*"
+    ],
+    "follow_index_pattern" : "{{leader_index}}-follower"
+  }
+}
+--------------------------------------------------
+// TESTRESPONSE
diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-stats.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-stats.asciidoc
new file mode 100644
index 0000000000000..85c6775af1cab
--- /dev/null
+++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-stats.asciidoc
@@ -0,0 +1,46 @@
+[role="xpack"]
+[testenv="platinum"]
+[[ccr-get-auto-follow-stats]]
+=== Get Auto-Follow Stats API
+++++
+Get Auto-Follow Stats
+++++
+
+Get auto-follow stats.
+
+==== Description
+
+This API gets stats about auto-follow patterns.
+
+==== Request
+
+[source,js]
+--------------------------------------------------
+GET /_ccr/auto_follow/stats
+--------------------------------------------------
+// CONSOLE
+// TEST
+
+==== Example
+
+This example retrieves stats about auto-follow patterns:
+
+[source,js]
+--------------------------------------------------
+GET /_ccr/auto_follow/stats
+--------------------------------------------------
+// CONSOLE
+// TEST
+
+The API returns the following result:
+
+[source,js]
+--------------------------------------------------
+{
+  "number_of_successful_follow_indices" : 16,
+  "number_of_failed_follow_indices" : 0,
+  "number_of_failed_remote_cluster_state_requests" : 0,
+  "recent_auto_follow_errors" : [ ]
+}
+--------------------------------------------------
+// TESTRESPONSE[s/"number_of_successful_follow_indices" : 16/"number_of_successful_follow_indices" : $body.number_of_successful_follow_indices/]
diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc
new file mode 100644
index 0000000000000..e18b69579d303
--- /dev/null
+++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc
@@ -0,0 +1,118 @@
+[role="xpack"]
+[testenv="platinum"]
+[[ccr-put-auto-follow-pattern]]
+=== Create Auto-Follow Pattern API
+++++
+Create Auto-Follow Pattern
+++++
+
+Creates an auto-follow pattern.
+
+==== Description
+
+This API creates a new named collection of auto-follow patterns against the
+remote cluster specified in the request body. Newly created indices on the
+remote cluster matching any of the specified patterns will be automatically
+configured as follower indices.
+
+==== Request
+
+[source,js]
+--------------------------------------------------
+PUT /_ccr/auto_follow/
+{
+  "remote_cluster" : "",
+  "leader_index_patterns" :
+  [
+    ""
+  ],
+  "follow_index_pattern" : ""
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:remote_cluster]
+// TEST[s//auto_follow_pattern_name/]
+// TEST[s//remote_cluster/]
+// TEST[s//leader_index*/]
+// TEST[s//{{leader_index}}-follower/]
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+DELETE /_ccr/auto_follow/auto_follow_pattern_name
+--------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+//////////////////////////
+
+==== Path Parameters
+`auto_follow_pattern_name` (required)::
+  (string) name of the collection of auto-follow patterns
+
+==== Request Body
+`remote_cluster`::
+  (required string) the <> containing the
+  leader indices to match against
+
+`leader_index_patterns`::
+  (array) an array of simple index patterns to match against indices in the
+  remote cluster specified by the `remote_cluster` field
+
+`follow_index_pattern`::
+  (string) the name of follower index; the template `{{leader_index}}` can be
+  used to derive the name of the follower index from the name of the leader
+  index
+
+include::../follow-request-body.asciidoc[]
+
+==== Example
+
+This example creates an auto-follow pattern named `my_auto_follow_pattern`:
+
+[source,js]
+--------------------------------------------------
+PUT /_ccr/auto_follow/my_auto_follow_pattern
+{
+  "remote_cluster" : "remote_cluster",
+  "leader_index_patterns" :
+  [
+    "leader_index*"
+  ],
+  "follow_index_pattern" : "{{leader_index}}-follower",
+  "max_read_request_operation_count" : 1024,
+  "max_outstanding_read_requests" : 16,
+  "max_read_request_size" : "1024k",
+  "max_write_request_operation_count" : 32768,
+  "max_write_request_size" : "16k",
+  "max_outstanding_write_requests" : 8,
+  "max_write_buffer_count" : 512,
+  "max_write_buffer_size" : "512k",
+  "max_retry_delay" : "10s",
+  "read_poll_timeout" : "30s"
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:remote_cluster]
+
+The API returns the following result:
+
+[source,js]
+--------------------------------------------------
+{
+  "acknowledged" : true
+}
+--------------------------------------------------
+// TESTRESPONSE
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+DELETE /_ccr/auto_follow/my_auto_follow_pattern
+--------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+//////////////////////////
diff --git a/docs/reference/ccr/apis/ccr-apis.asciidoc b/docs/reference/ccr/apis/ccr-apis.asciidoc
new file mode 100644
index 0000000000000..d4a45bab6ed61
--- /dev/null
+++ b/docs/reference/ccr/apis/ccr-apis.asciidoc
@@ -0,0 +1,38 @@
+[role="xpack"]
+[testenv="platinum"]
+[[ccr-apis]]
+== Cross-cluster replication APIs
+
+You can use the following APIs to perform {ccr} operations.
+
+[float]
+[[ccr-api-follow]]
+=== Follow
+
+* <>
+* <>
+* <>
+* <>
+* <>
+
+[float]
+[[ccr-api-auto-follow]]
+=== Auto-follow
+
+* <>
+* <>
+* <>
+* <>
+
+// follow
+include::follow/put-follow.asciidoc[]
+include::follow/post-pause-follow.asciidoc[]
+include::follow/post-resume-follow.asciidoc[]
+include::follow/post-unfollow.asciidoc[]
+include::follow/get-follow-stats.asciidoc[]
+
+// auto-follow
+include::auto-follow/put-auto-follow-pattern.asciidoc[]
+include::auto-follow/delete-auto-follow-pattern.asciidoc[]
+include::auto-follow/get-auto-follow-pattern.asciidoc[]
+include::auto-follow/get-auto-follow-stats.asciidoc[]
diff --git a/docs/reference/ccr/apis/follow-request-body.asciidoc b/docs/reference/ccr/apis/follow-request-body.asciidoc
new file mode 100644
index 0000000000000..7215cc01302a1
--- /dev/null
+++ b/docs/reference/ccr/apis/follow-request-body.asciidoc
@@ -0,0 +1,44 @@
+`max_read_request_operation_count`::
+  (integer) the maximum number of operations to pull per read from the remote
+  cluster
+
+`max_outstanding_read_requests`::
+  (long) the maximum number of outstanding reads requests from the remote
+  cluster
+
+`max_read_request_size`::
+  (<>) the maximum size in bytes of per read of a batch
+  of operations pulled from the remote cluster
+
+`max_write_request_operation_count`::
+  (integer) the maximum number of operations per bulk write request executed on
+  the follower
+
+`max_write_request_size`::
+  (<>) the maximum total bytes of operations per bulk write request
+  executed on the follower
+
+`max_outstanding_write_requests`::
+  (integer) the maximum number of outstanding write requests on the follower
+
+`max_write_buffer_count`::
+  (integer) the maximum number of operations that can be queued for writing;
+  when this limit is reached, reads from the remote cluster will be deferred
+  until the number of queued operations goes below the limit
+
+`max_write_buffer_size`::
+  (<>) the maximum total bytes of operations that can be queued for
+  writing; when this limit is reached, reads from the remote cluster will be
+  deferred until the total bytes of queued operations goes below the limit
+
+`max_retry_delay`::
+  (<>) the maximum time to wait before retrying an
+  operation that failed exceptionally; an exponential backoff strategy is
+  employed when retrying
+
+`read_poll_timeout`::
+  (<>) the maximum time to wait for new operations on the
+  remote cluster when the follower index is synchronized with the leader index;
+  when the timeout has elapsed, the poll for operations will return to the
+  follower so that it can update some statistics, and then the follower will
+  immediately attempt to read from the leader again
\ No newline at end of file
diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc
new file mode 100644
index 0000000000000..efbaeecb712d5
--- /dev/null
+++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc
@@ -0,0 +1,254 @@
+[role="xpack"]
+[testenv="platinum"]
+[[ccr-get-follow-stats]]
+=== Get Follower Stats API
+++++
+Get Follower Stats
+++++
+
+Get follower stats.
+
+==== Description
+
+This API gets follower stats. This API will return shard-level stats about the
+following tasks associated with each shard for the specified indices.
+
+==== Request
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+PUT /follower_index/_ccr/follow
+{
+  "remote_cluster" : "remote_cluster",
+  "leader_index" : "leader_index"
+}
+--------------------------------------------------
+// CONSOLE
+// TESTSETUP
+// TEST[setup:remote_cluster_and_leader_index]
+
+[source,js]
+--------------------------------------------------
+POST /follower_index/_ccr/pause_follow
+--------------------------------------------------
+// CONSOLE
+// TEARDOWN
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+GET /_ccr/stats
+--------------------------------------------------
+// CONSOLE
+
+[source,js]
+--------------------------------------------------
+GET //_ccr/stats
+--------------------------------------------------
+// CONSOLE
+// TEST[s//follower_index/]
+
+==== Path Parmeters
+`index` ::
+  (string) a comma-delimited list of index patterns
+
+==== Results
+
+This API returns the following information:
+
+`indices`::
+  (array) an array of follower index statistics
+
+The `indices` array consists of objects containing two fields:
+
+`indices[].index`::
+  (string) the name of the follower index
+
+`indices[].shards`::
+  (array) an array of shard-level following task statistics
+
+The `shards` array consists of objects containing the following fields:
+
+`indices[].shards[].remote_cluster`::
+  (string) the > containing the leader
+  index
+
+`indices[].shards[].leader_index`::
+  (string) the name of the index in the leader cluster being followed
+
+`indices[].shards[].follower_index`::
+  (string) the name of the follower index
+
+`indices[].shards[].shard_id`::
+  (integer) the numerical shard ID, with values from 0 to one less than the
+  number of replicas
+
+`indices[].shards[].leader_global_checkpoint`::
+  (long) the current global checkpoint on the leader known to the follower task
+
+`indices[].shards[].leader_max_seq_no`::
+  (long) the current maximum sequence number on the leader known to the follower
+  task
+
+`indices[].shards[].follower_global_checkpoint`::
+  (long) the current global checkpoint on the follower; the difference between the
+  `leader_global_checkpoint` and the `follower_global_checkpoint` is an
+  indication of how much the follower is lagging the leader
+
+`indices[].shards[].follower_max_seq_no`::
+  (long) the current maximum sequence number on the follower
+
+`indices[].shards[].last_requested_seq_no`::
+  (long) the starting sequence number of the last batch of operations requested
+  from the leader
+
+`indices[].shards[].outstanding_read_requests`::
+  (integer) the number of active read requests from the follower
+
+`indices[].shards[].outstanding_write_requests`::
+  (integer) the number of active bulk write requests on the follower
+
+`indices[].shards[].write_buffer_operation_count`::
+  (integer) the number of write operations queued on the follower
+
+`indices[].shards[].follower_mapping_version`::
+  (long) the mapping version the follower is synced up to
+
+`indices[].shards[].total_read_time_millis`::
+  (long) the total time reads were outstanding, measured from the time a read
+  was sent to the leader to the time a reply was returned to the follower
+
+`indices[].shards[].total_read_remote_exec_time_millis`::
+  (long) the total time reads spent executing on the remote cluster
+
+`indices[].shards[].successful_read_requests`::
+  (long) the number of successful fetches
+
+`indices[].shards[].failed_read_requests`::
+  (long) the number of failed reads
+
+`indices[].shards[].operations_read`::
+  (long) the total number of operations read from the leader
+
+`indices[].shards[].bytes_read`::
+  (long) the total of transferred bytes read from the leader (note this is only
+  an estimate, and does not account for compression if enabled)
+
+`indices[].shards[].total_write_time_millis`::
+  (long) the total time spent writing on the follower
+
+`indices[].shards[].write_buffer_size_in_bytes`::
+  (long) the total number of bytes of operations currently queued for writing
+
+`indices[].shards[].successful_write_requests`::
+  (long) the number of bulk write requests executed on the follower
+
+`indices[].shards[].failed_write_requests`::
+  (long) the number of failed bulk write requests executed on the follower
+
+`indices[].shards[].operations_written`::
+  (long) the number of operations written on the follower
+
+`indices[].shards[].read_exceptions`::
+  (array) an array of objects representing failed reads
+
+The `read_exceptions` array consists of objects containing the following
+fields:
+
+`indices[].shards[].read_exceptions[].from_seq_no`::
+  (long) the starting sequence number of the batch requested from the leader
+
+`indices[].shards[].read_exceptions[].retries`::
+  (integer) the number of times the batch has been retried
+
+`indices[].shards[].read_exceptions[].exception`::
+  (object) represents the exception that caused the read to fail
+
+Continuing with the fields from `shards`:
+
+`indices[].shards[].time_since_last_read_millis`::
+  (long) the number of milliseconds since a read request was sent to the leader;
+  note that when the follower is caught up to the leader, this number will
+  increase up to the configured `read_poll_timeout` at which point another read
+  request will be sent to the leader
+
+`indices[].fatal_exception`::
+  (object) an object representing a fatal exception that cancelled the following
+  task; in this situation, the following task must be resumed manually with the
+  <>
+
+==== Example
+
+This example retrieves follower stats:
+
+[source,js]
+--------------------------------------------------
+GET /_ccr/stats
+--------------------------------------------------
+// CONSOLE
+
+The API returns the following results:
+[source,js]
+--------------------------------------------------
+{
+  "indices" : [
+    {
+      "index" : "follower_index",
+      "shards" : [
+        {
+          "remote_cluster" : "remote_cluster",
+          "leader_index" : "leader_index",
+          "follower_index" : "follower_index",
+          "shard_id" : 0,
+          "leader_global_checkpoint" : 1024,
+          "leader_max_seq_no" : 1536,
+          "follower_global_checkpoint" : 768,
+          "follower_max_seq_no" : 896,
+          "last_requested_seq_no" : 897,
+          "outstanding_read_requests" : 8,
+          "outstanding_write_requests" : 2,
+          "write_buffer_operation_count" : 64,
+          "follower_mapping_version" : 4,
+          "total_read_time_millis" : 32768,
+          "total_read_remote_exec_time_millis" : 16384,
+          "successful_read_requests" : 32,
+          "failed_read_requests" : 0,
+          "operations_read" : 896,
+          "bytes_read" : 32768,
+          "total_write_time_millis" : 16384,
+          "write_buffer_size_in_bytes" : 1536,
+          "successful_write_requests" : 16,
+          "failed_write_requests" : 0,
+          "operations_written" : 832,
+          "read_exceptions" : [ ],
+          "time_since_last_read_millis" : 8
+        }
+      ]
+    }
+  ]
+}
+--------------------------------------------------
+// TESTRESPONSE[s/"leader_global_checkpoint" : 1024/"leader_global_checkpoint" : $body.indices.0.shards.0.leader_global_checkpoint/]
+// TESTRESPONSE[s/"leader_max_seq_no" : 1536/"leader_max_seq_no" : $body.indices.0.shards.0.leader_max_seq_no/]
+// TESTRESPONSE[s/"follower_global_checkpoint" : 768/"follower_global_checkpoint" : $body.indices.0.shards.0.follower_global_checkpoint/]
+// TESTRESPONSE[s/"follower_max_seq_no" : 896/"follower_max_seq_no" : $body.indices.0.shards.0.follower_max_seq_no/]
+// TESTRESPONSE[s/"last_requested_seq_no" : 897/"last_requested_seq_no" : $body.indices.0.shards.0.last_requested_seq_no/]
+// TESTRESPONSE[s/"outstanding_read_requests" : 8/"outstanding_read_requests" : $body.indices.0.shards.0.outstanding_read_requests/]
+// TESTRESPONSE[s/"outstanding_write_requests" : 2/"outstanding_write_requests" : $body.indices.0.shards.0.outstanding_write_requests/]
+// TESTRESPONSE[s/"write_buffer_operation_count" : 64/"write_buffer_operation_count" : $body.indices.0.shards.0.write_buffer_operation_count/]
+// TESTRESPONSE[s/"follower_mapping_version" : 4/"follower_mapping_version" : $body.indices.0.shards.0.follower_mapping_version/]
+// TESTRESPONSE[s/"total_read_time_millis" : 32768/"total_read_time_millis" : $body.indices.0.shards.0.total_read_time_millis/]
+// TESTRESPONSE[s/"total_read_remote_exec_time_millis" : 16384/"total_read_remote_exec_time_millis" : $body.indices.0.shards.0.total_read_remote_exec_time_millis/]
+// TESTRESPONSE[s/"successful_read_requests" : 32/"successful_read_requests" : $body.indices.0.shards.0.successful_read_requests/]
+// TESTRESPONSE[s/"failed_read_requests" : 0/"failed_read_requests" : $body.indices.0.shards.0.failed_read_requests/]
+// TESTRESPONSE[s/"operations_read" : 896/"operations_read" : $body.indices.0.shards.0.operations_read/]
+// TESTRESPONSE[s/"bytes_read" : 32768/"bytes_read" : $body.indices.0.shards.0.bytes_read/]
+// TESTRESPONSE[s/"total_write_time_millis" : 16384/"total_write_time_millis" : $body.indices.0.shards.0.total_write_time_millis/]
+// TESTRESPONSE[s/"write_buffer_size_in_bytes" : 1536/"write_buffer_size_in_bytes" : $body.indices.0.shards.0.write_buffer_size_in_bytes/]
+// TESTRESPONSE[s/"successful_write_requests" : 16/"successful_write_requests" : $body.indices.0.shards.0.successful_write_requests/]
+// TESTRESPONSE[s/"failed_write_requests" : 0/"failed_write_requests" : $body.indices.0.shards.0.failed_write_requests/]
+// TESTRESPONSE[s/"operations_written" : 832/"operations_written" : $body.indices.0.shards.0.operations_written/]
+// TESTRESPONSE[s/"time_since_last_read_millis" : 8/"time_since_last_read_millis" : $body.indices.0.shards.0.time_since_last_read_millis/]
diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc
new file mode 100644
index 0000000000000..7fa4dbdd45591
--- /dev/null
+++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc
@@ -0,0 +1,68 @@
+[role="xpack"]
+[testenv="platinum"]
+[[ccr-post-pause-follow]]
+=== Pause Follower API
+++++
+Pause Follower
+++++
+
+Pauses a follower index.
+
+==== Description
+
+This API pauses a follower index. When this API returns, the follower index will
+not fetch any additional operations from the leader index. You can resume
+following with the <>. Pausing and
+resuming a follower index can be used to change the configuration of the
+following task.
+
+==== Request
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+PUT /follower_index/_ccr/follow
+{
+  "remote_cluster" : "remote_cluster",
+  "leader_index" : "leader_index"
+}
+--------------------------------------------------
+// CONSOLE
+// TESTSETUP
+// TEST[setup:remote_cluster_and_leader_index]
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+POST //_ccr/pause_follow
+--------------------------------------------------
+// CONSOLE
+// TEST[s//follower_index/]
+
+==== Path Parameters
+
+`follower_index` (required)::
+  (string) the name of the follower index
+
+==== Example
+
+This example pauses a follower index named `follower_index`:
+
+[source,js]
+--------------------------------------------------
+POST /follower_index/_ccr/pause_follow
+--------------------------------------------------
+// CONSOLE
+// TEST
+
+The API returns the following result:
+
+[source,js]
+--------------------------------------------------
+{
+  "acknowledged" : true
+}
+--------------------------------------------------
+// TESTRESPONSE
diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc
new file mode 100644
index 0000000000000..eb19050961be7
--- /dev/null
+++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc
@@ -0,0 +1,94 @@
+[role="xpack"]
+[testenv="platinum"]
+[[ccr-post-resume-follow]]
+=== Resume Follower API
+++++
+Resume Follower
+++++
+
+Resumes a follower index.
+
+==== Description
+
+This API resumes a follower index that has been paused either explicitly with
+the <> or implicitly due to
+execution that can not be retried due to failure during following. When this API
+returns, the follower index will resume fetching operations from the leader index.
+
+==== Request
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+PUT /follower_index/_ccr/follow
+{
+  "remote_cluster" : "remote_cluster",
+  "leader_index" : "leader_index"
+}
+
+POST /follower_index/_ccr/pause_follow
+--------------------------------------------------
+// CONSOLE
+// TESTSETUP
+// TEST[setup:remote_cluster_and_leader_index]
+
+[source,js]
+--------------------------------------------------
+POST /follower_index/_ccr/pause_follow
+--------------------------------------------------
+// CONSOLE
+// TEARDOWN
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+POST //_ccr/resume_follow
+{
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[s//follower_index/]
+// TEST[s//remote_cluster/]
+// TEST[s//leader_index/]
+
+==== Path Parameters
+
+`follower_index` (required)::
+  (string) the name of the follower index
+
+==== Request Body
+include::../follow-request-body.asciidoc[]
+
+==== Example
+
+This example resumes a follower index named `follower_index`:
+
+[source,js]
+--------------------------------------------------
+POST /follower_index/_ccr/resume_follow
+{
+  "max_read_request_operation_count" : 1024,
+  "max_outstanding_read_requests" : 16,
+  "max_read_request_size" : "1024k",
+  "max_write_request_operation_count" : 32768,
+  "max_write_request_size" : "16k",
+  "max_outstanding_write_requests" : 8,
+  "max_write_buffer_count" : 512,
+  "max_write_buffer_size" : "512k",
+  "max_retry_delay" : "10s",
+  "read_poll_timeout" : "30s"
+}
+--------------------------------------------------
+// CONSOLE
+
+The API returns the following result:
+
+[source,js]
+--------------------------------------------------
+{
+  "acknowledged" : true
+}
+--------------------------------------------------
+// TESTRESPONSE
diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc
new file mode 100644
index 0000000000000..d84f170417998
--- /dev/null
+++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc
@@ -0,0 +1,75 @@
+[role="xpack"]
+[testenv="platinum"]
+[[ccr-post-unfollow]]
+=== Unfollow API
+++++
+Unfollow
+++++
+
+Converts a follower index to a regular index.
+
+==== Description
+
+This API stops the following task associated with a follower index and removes
+index metadata and settings associated with {ccr}. This enables the index to
+treated as a regular index. The follower index must be paused and closed before
+invoking the unfollow API.
+
+NOTE: Currently {ccr} does not support converting an existing regular index to a
+follower index. Converting a follower index to a regular index is an
+irreversible operation.
+
+==== Request
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+PUT /follower_index/_ccr/follow
+{
+  "remote_cluster" : "remote_cluster",
+  "leader_index" : "leader_index"
+}
+
+POST /follower_index/_ccr/pause_follow
+
+POST /follower_index/_close
+--------------------------------------------------
+// CONSOLE
+// TESTSETUP
+// TEST[setup:remote_cluster_and_leader_index]
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+POST //_ccr/unfollow
+--------------------------------------------------
+// CONSOLE
+// TEST[s//follower_index/]
+
+==== Path Parmeters
+
+`follower_index` (required)::
+  (string) the name of the follower index
+
+==== Example
+
+This example converts `follower_index` from a follower index to a regular index:
+
+[source,js]
+--------------------------------------------------
+POST /follower_index/_ccr/unfollow
+--------------------------------------------------
+// CONSOLE
+// TEST
+
+The API returns the following result:
+
+[source,js]
+--------------------------------------------------
+{
+  "acknowledged" : true
+}
+--------------------------------------------------
+// TESTRESPONSE
diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc
new file mode 100644
index 0000000000000..db0005fe3c983
--- /dev/null
+++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc
@@ -0,0 +1,94 @@
+[role="xpack"]
+[testenv="platinum"]
+[[ccr-put-follow]]
+=== Create Follower API
+++++
+Create Follower
+++++
+
+Creates a follower index.
+
+==== Description
+
+This API creates a new follower index that is configured to follow the
+referenced leader index. When this API returns, the follower index exists, and
+{ccr} starts replicating operations from the leader index to the follower index.
+
+==== Request
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+POST /follower_index/_ccr/pause_follow
+--------------------------------------------------
+// CONSOLE
+// TEARDOWN
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+PUT //_ccr/follow
+{
+  "remote_cluster" : "",
+  "leader_index" : ""
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:remote_cluster_and_leader_index]
+// TEST[s//follower_index/]
+// TEST[s//remote_cluster/]
+// TEST[s//leader_index/]
+
+==== Path Parameters
+
+`follower_index` (required)::
+  (string) the name of the follower index
+
+==== Request Body
+`remote_cluster` (required)::
+  (string) the <> containing the leader
+  index
+
+`leader_index` (required)::
+  (string) the name of the index in the leader cluster to follow
+
+include::../follow-request-body.asciidoc[]
+
+==== Example
+
+This example creates a follower index named `follower_index`:
+
+[source,js]
+--------------------------------------------------
+PUT /follower_index/_ccr/follow
+{
+  "remote_cluster" : "remote_cluster",
+  "leader_index" : "leader_index",
+  "max_read_request_operation_count" : 1024,
+  "max_outstanding_read_requests" : 16,
+  "max_read_request_size" : "1024k",
+  "max_write_request_operation_count" : 32768,
+  "max_write_request_size" : "16k",
+  "max_outstanding_write_requests" : 8,
+  "max_write_buffer_count" : 512,
+  "max_write_buffer_size" : "512k",
+  "max_retry_delay" : "10s",
+  "read_poll_timeout" : "30s"
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:remote_cluster_and_leader_index]
+
+The API returns the following result:
+
+[source,js]
+--------------------------------------------------
+{
+  "follow_index_created" : true,
+  "follow_index_shards_acked" : true,
+  "index_following_started" : true
+}
+--------------------------------------------------
+// TESTRESPONSE
diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc
new file mode 100644
index 0000000000000..daa6f298e5ff7
--- /dev/null
+++ b/docs/reference/ccr/getting-started.asciidoc
@@ -0,0 +1,6 @@
+[role="xpack"]
+[testenv="platinum"]
+[[ccr-getting-started]]
+== Getting Started
+
+This is the getting started section of the {ccr} docs.
\ No newline at end of file
diff --git a/docs/reference/ccr/index.asciidoc b/docs/reference/ccr/index.asciidoc
new file mode 100644
index 0000000000000..1d5e9445a7b1d
--- /dev/null
+++ b/docs/reference/ccr/index.asciidoc
@@ -0,0 +1,15 @@
+[role="xpack"]
+[testenv="platinum"]
+[[xpack-ccr]]
+= Cross-cluster replication
+
+[partintro]
+--
+
+* <>
+* <>
+
+--
+
+include::overview.asciidoc[]
+include::getting-started.asciidoc[]
diff --git a/docs/reference/ccr/overview.asciidoc b/docs/reference/ccr/overview.asciidoc
new file mode 100644
index 0000000000000..648a981bc5bdb
--- /dev/null
+++ b/docs/reference/ccr/overview.asciidoc
@@ -0,0 +1,6 @@
+[role="xpack"]
+[testenv="platinum"]
+[[ccr-overview]]
+== Overview
+
+This is the overview section of the {ccr} docs.
diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc
index 216983bc6f01d..dd841fccda761 100644
--- a/docs/reference/index.asciidoc
+++ b/docs/reference/index.asciidoc
@@ -55,6 +55,8 @@ include::index-modules.asciidoc[]
 
 include::ingest.asciidoc[]
 
+include::ccr/index.asciidoc[]
+
 include::sql/index.asciidoc[]
 
 include::monitoring/index.asciidoc[]
diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc
index c6243ab25981b..5aef27e127500 100644
--- a/docs/reference/rest-api/index.asciidoc
+++ b/docs/reference/rest-api/index.asciidoc
@@ -8,6 +8,7 @@
 directly to configure and access {xpack} features.
 
 * <>
+* <>
 * <>
 * <>
 * <>
@@ -19,6 +20,7 @@ directly to configure and access {xpack} features.
 
 
 include::info.asciidoc[]
+include::{es-repo-dir}/ccr/apis/ccr-apis.asciidoc[]
 include::{es-repo-dir}/graph/explore.asciidoc[]
 include::{es-repo-dir}/licensing/index.asciidoc[]
 include::{es-repo-dir}/migration/migration.asciidoc[]