From 25959ed8cfc9af9ecd4b47d67ac8d50df9e5b901 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 22 May 2018 13:53:34 +0200 Subject: [PATCH 01/22] [Security] Include an empty json object in an json array when FLS filters out all fields (#30709) Prior to this change an json array element with no fields would be omitted from json array. Nested inner hits source filtering relies on the fact that the json array element numbering remains untouched and this causes AOOB exceptions in the ES side during the fetch phase without this change. Closes #30624 --- .../authz/accesscontrol/FieldSubsetReader.java | 4 +--- .../accesscontrol/FieldSubsetReaderTests.java | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java index 5779924bb27fb..8559ab0703b43 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java @@ -193,9 +193,7 @@ private static List filter(Iterable iterable, CharacterRunAutomaton i continue; } Map filteredValue = filter((Map)value, includeAutomaton, state); - if (filteredValue.isEmpty() == false) { - filtered.add(filteredValue); - } + filtered.add(filteredValue); } else if (value instanceof Iterable) { List filteredValue = filter((Iterable) value, includeAutomaton, initialState); if (filteredValue.isEmpty() == false) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java index 4c74e7f5d9059..e71b0e5e8bdc1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java @@ -716,6 +716,22 @@ public void testSourceFiltering() { expected.put("foo", subArray); assertEquals(expected, filtered); + + // json array objects that have no matching fields should be left empty instead of being removed: + // (otherwise nested inner hit source filtering fails with AOOB) + map = new HashMap<>(); + map.put("foo", "value"); + List> values = new ArrayList<>(); + values.add(Collections.singletonMap("foo", "1")); + values.add(Collections.singletonMap("baz", "2")); + map.put("bar", values); + + include = new CharacterRunAutomaton(Automatons.patterns("bar.baz")); + filtered = FieldSubsetReader.filter(map, include, 0); + + expected = new HashMap<>(); + expected.put("bar", Arrays.asList(new HashMap<>(), Collections.singletonMap("baz", "2"))); + assertEquals(expected, filtered); } /** From 54740cc551ee36f4bca9526cead71268aba84e46 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 22 May 2018 14:57:02 +0200 Subject: [PATCH 02/22] Increase the maximum number of filters that may be in the cache. (#30655) We added this limit because we occasionally saw cases where most of the memory usage of the cache was spent on the keys (ie. queries) rather than the values, which caused the cache to vastly underestimate its memory usage. In recent releases, we disabled caching on heavy `terms` queries, which were the main source of the problem, so putting more entries in the cache should be safer. --- .../java/org/elasticsearch/indices/IndicesQueryCache.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 60741c87f2165..2695c1728491b 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -52,8 +52,10 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache, public static final Setting INDICES_CACHE_QUERY_SIZE_SETTING = Setting.memorySizeSetting("indices.queries.cache.size", "10%", Property.NodeScope); + // mostly a way to prevent queries from being the main source of memory usage + // of the cache public static final Setting INDICES_CACHE_QUERY_COUNT_SETTING = - Setting.intSetting("indices.queries.cache.count", 1000, 1, Property.NodeScope); + Setting.intSetting("indices.queries.cache.count", 10_000, 1, Property.NodeScope); // enables caching on all segments instead of only the larger ones, for testing only public static final Setting INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING = Setting.boolSetting("indices.queries.cache.all_segments", false, Property.NodeScope); From 0a3b9e2138c52bfe3fd9ff7e8c02ac8ff8a63945 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Tue, 22 May 2018 07:35:16 -0600 Subject: [PATCH 03/22] Test: wait for netty threads in a JUnit ClassRule (#30763) This commit changes the wait for a few netty threads to wait for these threads to complete after the cluster has stopped. Previously, we were waiting for these threads before the cluster was actually stopped; the cluster is stopped in an AfterClass method of ESIntegTestCase, while the wait was performed in the AfterClass of a class that extended ESIntegTestCase, which is always executed before the AfterClass of ESIntegTestCase. Now, the wait is contained in an ExternalResource ClassRule that implements the waiting for the threads to terminate in the after method. This rule is executed after the AfterClass method in ESIntegTestCase. The same fix has also been applied in SecuritySingleNodeTestCase. Closes #30563 --- .../test/SecurityIntegTestCase.java | 48 +++++++++++------- .../test/SecuritySingleNodeTestCase.java | 49 ++++++++++++------- 2 files changed, 60 insertions(+), 37 deletions(-) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index 00b46b332cb7c..815f26942767a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -47,6 +47,7 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.rules.ExternalResource; @@ -163,24 +164,6 @@ public static void initDefaultSettings() { public static void destroyDefaultSettings() { SECURITY_DEFAULT_SETTINGS = null; customSecuritySettingsSource = null; - // Wait for the network threads to finish otherwise there is the possibility that one of - // the threads lingers and trips the thread leak detector - try { - GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (IllegalStateException e) { - if (e.getMessage().equals("thread was not started") == false) { - throw e; - } - // ignore since the thread was never started - } - - try { - ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } } @Rule @@ -204,6 +187,35 @@ protected void before() throws Throwable { } }; + /** + * A JUnit class level rule that runs after the AfterClass method in {@link ESIntegTestCase}, + * which stops the cluster. After the cluster is stopped, there are a few netty threads that + * can linger, so we wait for them to finish otherwise these lingering threads can intermittently + * trigger the thread leak detector + */ + @ClassRule + public static final ExternalResource STOP_NETTY_RESOURCE = new ExternalResource() { + @Override + protected void after() { + try { + GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (IllegalStateException e) { + if (e.getMessage().equals("thread was not started") == false) { + throw e; + } + // ignore since the thread was never started + } + + try { + ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + }; + @Before //before methods from the superclass are run before this, which means that the current cluster is ready to go public void assertXPackIsInstalled() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java index 1ee654c0baffc..cda627806e7b5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java @@ -26,6 +26,7 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.rules.ExternalResource; @@ -97,25 +98,6 @@ private static void tearDownRestClient() { IOUtils.closeWhileHandlingException(restClient); restClient = null; } - - // Wait for the network threads to finish otherwise there is the possibility that one of - // the threads lingers and trips the thread leak detector - try { - GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (IllegalStateException e) { - if (e.getMessage().equals("thread was not started") == false) { - throw e; - } - // ignore since the thread was never started - } - - try { - ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } } @Rule @@ -130,6 +112,35 @@ protected void before() { } }; + /** + * A JUnit class level rule that runs after the AfterClass method in {@link ESIntegTestCase}, + * which stops the cluster. After the cluster is stopped, there are a few netty threads that + * can linger, so we wait for them to finish otherwise these lingering threads can intermittently + * trigger the thread leak detector + */ + @ClassRule + public static final ExternalResource STOP_NETTY_RESOURCE = new ExternalResource() { + @Override + protected void after() { + try { + GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } catch (IllegalStateException e) { + if (e.getMessage().equals("thread was not started") == false) { + throw e; + } + // ignore since the thread was never started + } + + try { + ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + }; + @Before //before methods from the superclass are run before this, which means that the current cluster is ready to go public void assertXPackIsInstalled() { From 74474e99d6c1b879578c004d602313849b5b7e70 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Tue, 22 May 2018 16:03:33 +0200 Subject: [PATCH 04/22] [Docs] Fix broken cross link in documentation --- docs/reference/migration/migrate_7_0/plugins.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/migration/migrate_7_0/plugins.asciidoc b/docs/reference/migration/migrate_7_0/plugins.asciidoc index 365a2c5a39f1e..829a93573c905 100644 --- a/docs/reference/migration/migrate_7_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_7_0/plugins.asciidoc @@ -10,7 +10,7 @@ You need to use settings which are starting with `azure.client.` prefix instead. * Global timeout setting `cloud.azure.storage.timeout` has been removed. You must set it per azure client instead. Like `azure.client.default.timeout: 10s` for example. -See {plugins}/repository-azure-usage.html#repository-azure-repository-settings[Azure Repository settings]. +See {plugins}/repository-azure-repository-settings.html#repository-azure-repository-settings[Azure Repository settings]. ==== Google Cloud Storage Repository plugin From 5f172b6795c2d7917144c535542ad521698016c5 Mon Sep 17 00:00:00 2001 From: Itamar Syn-Hershko Date: Tue, 22 May 2018 17:26:31 +0300 Subject: [PATCH 05/22] [Feature] Adding a char_group tokenizer (#24186) === Char Group Tokenizer The `char_group` tokenizer breaks text into terms whenever it encounters a character which is in a defined set. It is mostly useful for cases where a simple custom tokenization is desired, and the overhead of use of the <> is not acceptable. === Configuration The `char_group` tokenizer accepts one parameter: `tokenize_on_chars`:: A string containing a list of characters to tokenize the string on. Whenever a character from this list is encountered, a new token is started. Also supports escaped values like `\\n` and `\\f`, and in addition `\\s` to represent whitespace, `\\d` to represent digits and `\\w` to represent letters. Defaults to an empty list. === Example output ```The 2 QUICK Brown-Foxes jumped over the lazy dog's bone for $2``` When the configuration `\\s-:<>` is used for `tokenize_on_chars`, the above sentence would produce the following terms: ```[ The, 2, QUICK, Brown, Foxes, jumped, over, the, lazy, dog's, bone, for, $2 ]``` --- docs/reference/analysis/tokenizers.asciidoc | 7 + .../tokenizers/chargroup-tokenizer.asciidoc | 80 +++++++++++ .../common/CharGroupTokenizerFactory.java | 135 ++++++++++++++++++ .../analysis/common/CommonAnalysisPlugin.java | 1 + .../CharGroupTokenizerFactoryTests.java | 74 ++++++++++ 5 files changed, 297 insertions(+) create mode 100644 docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc create mode 100644 modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java create mode 100644 modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactoryTests.java diff --git a/docs/reference/analysis/tokenizers.asciidoc b/docs/reference/analysis/tokenizers.asciidoc index add0abdec0123..d6f15ded05fab 100644 --- a/docs/reference/analysis/tokenizers.asciidoc +++ b/docs/reference/analysis/tokenizers.asciidoc @@ -103,6 +103,11 @@ The `simple_pattern` tokenizer uses a regular expression to capture matching text as terms. It uses a restricted subset of regular expression features and is generally faster than the `pattern` tokenizer. +<>:: + +The `char_group` tokenizer is configurable through sets of characters to split +on, which is usually less expensive than running regular expressions. + <>:: The `simple_pattern_split` tokenizer uses the same restricted regular expression @@ -143,6 +148,8 @@ include::tokenizers/keyword-tokenizer.asciidoc[] include::tokenizers/pattern-tokenizer.asciidoc[] +include::tokenizers/chargroup-tokenizer.asciidoc[] + include::tokenizers/simplepattern-tokenizer.asciidoc[] include::tokenizers/simplepatternsplit-tokenizer.asciidoc[] diff --git a/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc new file mode 100644 index 0000000000000..e6bf79b0e961f --- /dev/null +++ b/docs/reference/analysis/tokenizers/chargroup-tokenizer.asciidoc @@ -0,0 +1,80 @@ +[[analysis-chargroup-tokenizer]] +=== Char Group Tokenizer + +The `char_group` tokenizer breaks text into terms whenever it encounters a +character which is in a defined set. It is mostly useful for cases where a simple +custom tokenization is desired, and the overhead of use of the <> +is not acceptable. + +[float] +=== Configuration + +The `char_group` tokenizer accepts one parameter: + +[horizontal] +`tokenize_on_chars`:: + A list containing a list of characters to tokenize the string on. Whenever a character + from this list is encountered, a new token is started. This accepts either single + characters like eg. `-`, or character groups: `whitespace`, `letter`, `digit`, + `punctuation`, `symbol`. + + +[float] +=== Example output + +[source,js] +--------------------------- +POST _analyze +{ + "tokenizer": { + "type": "char_group", + "tokenize_on_chars": [ + "whitespace", + "-", + "\n" + ] + }, + "text": "The QUICK brown-fox" +} +--------------------------- +// CONSOLE + +returns + +[source,js] +--------------------------- +{ + "tokens": [ + { + "token": "The", + "start_offset": 0, + "end_offset": 3, + "type": "word", + "position": 0 + }, + { + "token": "QUICK", + "start_offset": 4, + "end_offset": 9, + "type": "word", + "position": 1 + }, + { + "token": "brown", + "start_offset": 10, + "end_offset": 15, + "type": "word", + "position": 2 + }, + { + "token": "fox", + "start_offset": 16, + "end_offset": 19, + "type": "word", + "position": 3 + } + ] +} +--------------------------- +// TESTRESPONSE + diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java new file mode 100644 index 0000000000000..d4e1e794a309b --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactory.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.util.CharTokenizer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenizerFactory; + +import java.util.HashSet; +import java.util.Set; + +public class CharGroupTokenizerFactory extends AbstractTokenizerFactory{ + + private final Set tokenizeOnChars = new HashSet<>(); + private boolean tokenizeOnSpace = false; + private boolean tokenizeOnLetter = false; + private boolean tokenizeOnDigit = false; + private boolean tokenizeOnPunctuation = false; + private boolean tokenizeOnSymbol = false; + + public CharGroupTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + super(indexSettings, name, settings); + + for (final String c : settings.getAsList("tokenize_on_chars")) { + if (c == null || c.length() == 0) { + throw new RuntimeException("[tokenize_on_chars] cannot contain empty characters"); + } + + if (c.length() == 1) { + tokenizeOnChars.add((int) c.charAt(0)); + } + else if (c.charAt(0) == '\\') { + tokenizeOnChars.add((int) parseEscapedChar(c)); + } else { + switch (c) { + case "letter": + tokenizeOnLetter = true; + break; + case "digit": + tokenizeOnDigit = true; + break; + case "whitespace": + tokenizeOnSpace = true; + break; + case "punctuation": + tokenizeOnPunctuation = true; + break; + case "symbol": + tokenizeOnSymbol = true; + break; + default: + throw new RuntimeException("Invalid escaped char in [" + c + "]"); + } + } + } + } + + private char parseEscapedChar(final String s) { + int len = s.length(); + char c = s.charAt(0); + if (c == '\\') { + if (1 >= len) + throw new RuntimeException("Invalid escaped char in [" + s + "]"); + c = s.charAt(1); + switch (c) { + case '\\': + return '\\'; + case 'n': + return '\n'; + case 't': + return '\t'; + case 'r': + return '\r'; + case 'b': + return '\b'; + case 'f': + return '\f'; + case 'u': + if (len > 6) { + throw new RuntimeException("Invalid escaped char in [" + s + "]"); + } + return (char) Integer.parseInt(s.substring(2), 16); + default: + throw new RuntimeException("Invalid escaped char " + c + " in [" + s + "]"); + } + } else { + throw new RuntimeException("Invalid escaped char [" + s + "]"); + } + } + + @Override + public Tokenizer create() { + return new CharTokenizer() { + @Override + protected boolean isTokenChar(int c) { + if (tokenizeOnSpace && Character.isWhitespace(c)) { + return false; + } + if (tokenizeOnLetter && Character.isLetter(c)) { + return false; + } + if (tokenizeOnDigit && Character.isDigit(c)) { + return false; + } + if (tokenizeOnPunctuation && CharMatcher.Basic.PUNCTUATION.isTokenChar(c)) { + return false; + } + if (tokenizeOnSymbol && CharMatcher.Basic.SYMBOL.isTokenChar(c)) { + return false; + } + return !tokenizeOnChars.contains(c); + } + }; + } +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 624194092a02e..02a4197fba94a 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -184,6 +184,7 @@ public Map> getTokenizers() { tokenizers.put("ngram", NGramTokenizerFactory::new); tokenizers.put("edgeNGram", EdgeNGramTokenizerFactory::new); tokenizers.put("edge_ngram", EdgeNGramTokenizerFactory::new); + tokenizers.put("char_group", CharGroupTokenizerFactory::new); tokenizers.put("classic", ClassicTokenizerFactory::new); tokenizers.put("letter", LetterTokenizerFactory::new); tokenizers.put("lowercase", LowerCaseTokenizerFactory::new); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactoryTests.java new file mode 100644 index 0000000000000..1447531aa8731 --- /dev/null +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CharGroupTokenizerFactoryTests.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.Tokenizer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.test.ESTokenStreamTestCase; +import org.elasticsearch.test.IndexSettingsModule; + +import java.io.IOException; +import java.io.StringReader; +import java.util.Arrays; + + +public class CharGroupTokenizerFactoryTests extends ESTokenStreamTestCase { + public void testParseTokenChars() { + final Index index = new Index("test", "_na_"); + final Settings indexSettings = newAnalysisSettingsBuilder().build(); + IndexSettings indexProperties = IndexSettingsModule.newIndexSettings(index, indexSettings); + final String name = "cg"; + for (String[] conf : Arrays.asList( + new String[] { "\\v" }, + new String[] { "\\u00245" }, + new String[] { "commas" }, + new String[] { "a", "b", "c", "\\$" })) { + final Settings settings = newAnalysisSettingsBuilder().putList("tokenize_on_chars", conf).build(); + expectThrows(RuntimeException.class, () -> new CharGroupTokenizerFactory(indexProperties, null, name, settings).create()); + } + + for (String[] conf : Arrays.asList( + new String[0], + new String[] { "\\n" }, + new String[] { "\\u0024" }, + new String[] { "whitespace" }, + new String[] { "a", "b", "c" }, + new String[] { "a", "b", "c", "\\r" }, + new String[] { "\\r" }, + new String[] { "f", "o", "o", "symbol" })) { + final Settings settings = newAnalysisSettingsBuilder().putList("tokenize_on_chars", Arrays.asList(conf)).build(); + new CharGroupTokenizerFactory(indexProperties, null, name, settings).create(); + // no exception + } + } + + public void testTokenization() throws IOException { + final Index index = new Index("test", "_na_"); + final String name = "cg"; + final Settings indexSettings = newAnalysisSettingsBuilder().build(); + final Settings settings = newAnalysisSettingsBuilder().putList("tokenize_on_chars", "whitespace", ":", "\\u0024").build(); + Tokenizer tokenizer = new CharGroupTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), + null, name, settings).create(); + tokenizer.setReader(new StringReader("foo bar $34 test:test2")); + assertTokenStreamContents(tokenizer, new String[] {"foo", "bar", "34", "test", "test2"}); + } +} From 37f67d9e21bd76cc06b2feccbeb3f5ffaf5bd25b Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Tue, 22 May 2018 09:43:45 -0500 Subject: [PATCH 06/22] =?UTF-8?q?[Docs]=C2=A0Fix=20typo=20in=20circuit=20b?= =?UTF-8?q?reaker=20docs=20(#29659)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous description had a part that didn't fit and was probably from a copy/paste of the in flight requests description above. --- docs/reference/modules/indices/circuit_breaker.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/modules/indices/circuit_breaker.asciidoc b/docs/reference/modules/indices/circuit_breaker.asciidoc index 857f54132cc0a..3df187086bb69 100644 --- a/docs/reference/modules/indices/circuit_breaker.asciidoc +++ b/docs/reference/modules/indices/circuit_breaker.asciidoc @@ -76,7 +76,7 @@ memory on a node. The memory usage is based on the content length of the request [float] ==== Accounting requests circuit breaker -The in flight requests circuit breaker allows Elasticsearch to limit the memory +The accounting circuit breaker allows Elasticsearch to limit the memory usage of things held in memory that are not released when a request is completed. This includes things like the Lucene segment memory. From 31251c9a6d317cddc65c526a1cfc56aebc96e11c Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Tue, 22 May 2018 09:29:31 -0600 Subject: [PATCH 07/22] Make http pipelining support mandatory (#30695) This is related to #29500 and #28898. This commit removes the abilitiy to disable http pipelining. After this commit, any elasticsearch node will support pipelined requests from a client. Additionally, it extracts some of the http pipelining work to the server module. This extracted work is used to implement pipelining for the nio plugin. --- .../migration/migrate_7_0/settings.asciidoc | 10 +- docs/reference/modules/http.asciidoc | 2 - .../http/netty4/Netty4HttpChannel.java | 39 +-- .../netty4/Netty4HttpPipeliningHandler.java | 102 ++++++ .../http/netty4/Netty4HttpRequestHandler.java | 30 +- .../http/netty4/Netty4HttpResponse.java | 37 +++ .../netty4/Netty4HttpServerTransport.java | 20 +- .../pipelining/HttpPipelinedRequest.java | 88 ----- .../pipelining/HttpPipelinedResponse.java | 94 ------ .../pipelining/HttpPipeliningHandler.java | 144 --------- .../http/netty4/Netty4HttpChannelTests.java | 19 +- .../Netty4HttpPipeliningHandlerTests.java | 57 ++-- .../Netty4HttpServerPipeliningTests.java | 82 +---- ...EnabledIT.java => Netty4PipeliningIT.java} | 12 +- .../http/nio/HttpReadWriteHandler.java | 147 +++++---- .../http/nio/HttpWriteOperation.java | 7 +- .../elasticsearch/http/nio/NettyAdaptor.java | 20 +- .../elasticsearch/http/nio/NettyListener.java | 30 +- .../http/nio/NioHttpChannel.java | 10 +- .../http/nio/NioHttpPipeliningHandler.java | 103 ++++++ .../http/nio/NioHttpResponse.java | 37 +++ .../http/nio/NioHttpServerTransport.java | 15 +- .../org/elasticsearch/NioIntegTestCase.java | 5 +- .../http/nio/HttpReadWriteHandlerTests.java | 11 +- .../nio/NioHttpPipeliningHandlerTests.java | 304 ++++++++++++++++++ .../http/nio/NioPipeliningIT.java | 48 ++- .../common/settings/ClusterSettings.java | 1 - .../http/HttpHandlingSettings.java | 9 +- .../http/HttpPipelinedMessage.java | 37 +++ .../http/HttpPipelinedRequest.java | 33 ++ .../http/HttpPipeliningAggregator.java | 81 +++++ .../http/HttpTransportSettings.java | 2 - .../test/InternalTestCluster.java | 1 - 33 files changed, 990 insertions(+), 647 deletions(-) create mode 100644 modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java create mode 100644 modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpResponse.java delete mode 100644 modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedRequest.java delete mode 100644 modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedResponse.java delete mode 100644 modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipeliningHandler.java rename modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/{pipelining => }/Netty4HttpPipeliningHandlerTests.java (83%) rename modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/{Netty4PipeliningEnabledIT.java => Netty4PipeliningIT.java} (87%) create mode 100644 plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpPipeliningHandler.java create mode 100644 plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java create mode 100644 plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpPipeliningHandlerTests.java rename modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java => plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioPipeliningIT.java (53%) create mode 100644 server/src/main/java/org/elasticsearch/http/HttpPipelinedMessage.java create mode 100644 server/src/main/java/org/elasticsearch/http/HttpPipelinedRequest.java create mode 100644 server/src/main/java/org/elasticsearch/http/HttpPipeliningAggregator.java diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index d62d7e6065de0..7826afc05fa59 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -29,6 +29,14 @@ [[remove-http-enabled]] ==== Http enabled setting removed -The setting `http.enabled` previously allowed disabling binding to HTTP, only allowing +* The setting `http.enabled` previously allowed disabling binding to HTTP, only allowing use of the transport client. This setting has been removed, as the transport client will be removed in the future, thus requiring HTTP to always be enabled. + +[[remove-http-pipelining-setting]] +==== Http pipelining setting removed + +* The setting `http.pipelining` previously allowed disabling HTTP pipelining support. +This setting has been removed, as disabling http pipelining support on the server +provided little value. The setting `http.pipelining.max_events` can still be used to +limit the number of pipelined requests in-flight. diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index 7f29a9db7f605..dab8e8136893e 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -96,8 +96,6 @@ and stack traces in response output. Note: When set to `false` and the `error_tr parameter is specified, an error will be returned; when `error_trace` is not specified, a simple message will be returned. Defaults to `true` -|`http.pipelining` |Enable or disable HTTP pipelining, defaults to `true`. - |`http.pipelining.max_events` |The maximum number of events to be queued up in memory before a HTTP connection is closed, defaults to `10000`. |`http.max_warning_header_count` |The maximum number of warning headers in diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index 6e39a7f50d2cd..cb31d44454452 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -42,7 +42,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; -import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; @@ -59,29 +58,24 @@ final class Netty4HttpChannel extends AbstractRestChannel { private final Netty4HttpServerTransport transport; private final Channel channel; private final FullHttpRequest nettyRequest; - private final HttpPipelinedRequest pipelinedRequest; + private final int sequence; private final ThreadContext threadContext; private final HttpHandlingSettings handlingSettings; /** - * @param transport The corresponding NettyHttpServerTransport where this channel belongs to. - * @param request The request that is handled by this channel. - * @param pipelinedRequest If HTTP pipelining is enabled provide the corresponding pipelined request. May be null if - * HTTP pipelining is disabled. - * @param handlingSettings true iff error messages should include stack traces. - * @param threadContext the thread context for the channel + * @param transport The corresponding NettyHttpServerTransport where this channel belongs to. + * @param request The request that is handled by this channel. + * @param sequence The pipelining sequence number for this request + * @param handlingSettings true if error messages should include stack traces. + * @param threadContext the thread context for the channel */ - Netty4HttpChannel( - final Netty4HttpServerTransport transport, - final Netty4HttpRequest request, - final HttpPipelinedRequest pipelinedRequest, - final HttpHandlingSettings handlingSettings, - final ThreadContext threadContext) { + Netty4HttpChannel(Netty4HttpServerTransport transport, Netty4HttpRequest request, int sequence, HttpHandlingSettings handlingSettings, + ThreadContext threadContext) { super(request, handlingSettings.getDetailedErrorsEnabled()); this.transport = transport; this.channel = request.getChannel(); this.nettyRequest = request.request(); - this.pipelinedRequest = pipelinedRequest; + this.sequence = sequence; this.threadContext = threadContext; this.handlingSettings = handlingSettings; } @@ -129,7 +123,7 @@ public void sendResponse(RestResponse response) { final ChannelPromise promise = channel.newPromise(); if (releaseContent) { - promise.addListener(f -> ((Releasable)content).close()); + promise.addListener(f -> ((Releasable) content).close()); } if (releaseBytesStreamOutput) { @@ -140,13 +134,9 @@ public void sendResponse(RestResponse response) { promise.addListener(ChannelFutureListener.CLOSE); } - final Object msg; - if (pipelinedRequest != null) { - msg = pipelinedRequest.createHttpResponse(resp, promise); - } else { - msg = resp; - } - channel.writeAndFlush(msg, promise); + Netty4HttpResponse newResponse = new Netty4HttpResponse(sequence, resp); + + channel.writeAndFlush(newResponse, promise); releaseContent = false; releaseBytesStreamOutput = false; } finally { @@ -156,9 +146,6 @@ public void sendResponse(RestResponse response) { if (releaseBytesStreamOutput) { bytesOutputOrNull().close(); } - if (pipelinedRequest != null) { - pipelinedRequest.release(); - } } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java new file mode 100644 index 0000000000000..e930ffe7424a9 --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandler.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty4; + +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.handler.codec.http.LastHttpContent; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.http.HttpPipelinedRequest; +import org.elasticsearch.http.HttpPipeliningAggregator; +import org.elasticsearch.transport.netty4.Netty4Utils; + +import java.nio.channels.ClosedChannelException; +import java.util.Collections; +import java.util.List; + +/** + * Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests. + */ +public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler { + + private final Logger logger; + private final HttpPipeliningAggregator aggregator; + + /** + * Construct a new pipelining handler; this handler should be used downstream of HTTP decoding/aggregation. + * + * @param logger for logging unexpected errors + * @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is + * required as events cannot queue up indefinitely + */ + public Netty4HttpPipeliningHandler(Logger logger, final int maxEventsHeld) { + this.logger = logger; + this.aggregator = new HttpPipeliningAggregator<>(maxEventsHeld); + } + + @Override + public void channelRead(final ChannelHandlerContext ctx, final Object msg) { + if (msg instanceof LastHttpContent) { + HttpPipelinedRequest pipelinedRequest = aggregator.read(((LastHttpContent) msg).retain()); + ctx.fireChannelRead(pipelinedRequest); + } else { + ctx.fireChannelRead(msg); + } + } + + @Override + public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) { + assert msg instanceof Netty4HttpResponse : "Message must be type: " + Netty4HttpResponse.class; + Netty4HttpResponse response = (Netty4HttpResponse) msg; + boolean success = false; + try { + List> readyResponses = aggregator.write(response, promise); + for (Tuple readyResponse : readyResponses) { + ctx.write(readyResponse.v1().getResponse(), readyResponse.v2()); + } + success = true; + } catch (IllegalStateException e) { + ctx.channel().close(); + } finally { + if (success == false) { + promise.setFailure(new ClosedChannelException()); + } + } + } + + @Override + public void close(ChannelHandlerContext ctx, ChannelPromise promise) { + List> inflightResponses = aggregator.removeAllInflightResponses(); + + if (inflightResponses.isEmpty() == false) { + ClosedChannelException closedChannelException = new ClosedChannelException(); + for (Tuple inflightResponse : inflightResponses) { + try { + inflightResponse.v2().setFailure(closedChannelException); + } catch (RuntimeException e) { + logger.error("unexpected error while releasing pipelined http responses", e); + } + } + } + ctx.close(promise); + } +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index 74429c8dda9b7..c3a010226a408 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -30,41 +30,30 @@ import io.netty.handler.codec.http.HttpHeaders; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.HttpHandlingSettings; -import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; +import org.elasticsearch.http.HttpPipelinedRequest; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.transport.netty4.Netty4Utils; import java.util.Collections; @ChannelHandler.Sharable -class Netty4HttpRequestHandler extends SimpleChannelInboundHandler { +class Netty4HttpRequestHandler extends SimpleChannelInboundHandler> { private final Netty4HttpServerTransport serverTransport; private final HttpHandlingSettings handlingSettings; - private final boolean httpPipeliningEnabled; private final ThreadContext threadContext; Netty4HttpRequestHandler(Netty4HttpServerTransport serverTransport, HttpHandlingSettings handlingSettings, ThreadContext threadContext) { this.serverTransport = serverTransport; - this.httpPipeliningEnabled = serverTransport.pipelining; this.handlingSettings = handlingSettings; this.threadContext = threadContext; } @Override - protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception { - final FullHttpRequest request; - final HttpPipelinedRequest pipelinedRequest; - if (this.httpPipeliningEnabled && msg instanceof HttpPipelinedRequest) { - pipelinedRequest = (HttpPipelinedRequest) msg; - request = (FullHttpRequest) pipelinedRequest.last(); - } else { - pipelinedRequest = null; - request = (FullHttpRequest) msg; - } + protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest msg) throws Exception { + final FullHttpRequest request = msg.getRequest(); - boolean success = false; try { final FullHttpRequest copy = @@ -111,7 +100,7 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except Netty4HttpChannel innerChannel; try { innerChannel = - new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, handlingSettings, threadContext); + new Netty4HttpChannel(serverTransport, httpRequest, msg.getSequence(), handlingSettings, threadContext); } catch (final IllegalArgumentException e) { if (badRequestCause == null) { badRequestCause = e; @@ -126,7 +115,7 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except copy, ctx.channel()); innerChannel = - new Netty4HttpChannel(serverTransport, innerRequest, pipelinedRequest, handlingSettings, threadContext); + new Netty4HttpChannel(serverTransport, innerRequest, msg.getSequence(), handlingSettings, threadContext); } channel = innerChannel; } @@ -138,12 +127,9 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except } else { serverTransport.dispatchRequest(httpRequest, channel); } - success = true; } finally { - // the request is otherwise released in case of dispatch - if (success == false && pipelinedRequest != null) { - pipelinedRequest.release(); - } + // As we have copied the buffer, we can release the request + request.release(); } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpResponse.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpResponse.java new file mode 100644 index 0000000000000..779c9125a2e42 --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpResponse.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty4; + +import io.netty.handler.codec.http.FullHttpResponse; +import org.elasticsearch.http.HttpPipelinedMessage; + +public class Netty4HttpResponse extends HttpPipelinedMessage { + + private final FullHttpResponse response; + + public Netty4HttpResponse(int sequence, FullHttpResponse response) { + super(sequence); + this.response = response; + } + + public FullHttpResponse getResponse() { + return response; + } +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 8e5bace46aa7e..45e889797bde4 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -62,7 +62,6 @@ import org.elasticsearch.http.netty4.cors.Netty4CorsConfig; import org.elasticsearch.http.netty4.cors.Netty4CorsConfigBuilder; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; -import org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler; import org.elasticsearch.rest.RestUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.netty4.Netty4OpenChannelsHandler; @@ -99,7 +98,6 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING; import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; import static org.elasticsearch.http.netty4.cors.Netty4CorsHandler.ANY_ORIGIN; @@ -162,8 +160,6 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { protected final int workerCount; - protected final boolean pipelining; - protected final int pipeliningMaxEvents; /** @@ -204,6 +200,7 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); + this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()), Math.toIntExact(maxChunkSize.getBytes()), Math.toIntExact(maxHeaderSize.getBytes()), @@ -211,7 +208,8 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic SETTING_HTTP_RESET_COOKIES.get(settings), SETTING_HTTP_COMPRESSION.get(settings), SETTING_HTTP_COMPRESSION_LEVEL.get(settings), - SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings)); + SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings), + pipeliningMaxEvents); this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); this.workerCount = SETTING_HTTP_WORKER_COUNT.get(settings); @@ -226,14 +224,12 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic ByteSizeValue receivePredictor = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE.get(settings); recvByteBufAllocator = new FixedRecvByteBufAllocator(receivePredictor.bytesAsInt()); - this.pipelining = SETTING_PIPELINING.get(settings); - this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); this.corsConfig = buildCorsConfig(settings); logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], " + - "receive_predictor[{}], max_composite_buffer_components[{}], pipelining[{}], pipelining_max_events[{}]", - maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength, receivePredictor, maxCompositeBufferComponents, - pipelining, pipeliningMaxEvents); + "receive_predictor[{}], max_composite_buffer_components[{}], pipelining_max_events[{}]", + maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength, receivePredictor, maxCompositeBufferComponents, + pipeliningMaxEvents); } public Settings settings() { @@ -452,9 +448,7 @@ protected void initChannel(Channel ch) throws Exception { if (SETTING_CORS_ENABLED.get(transport.settings())) { ch.pipeline().addLast("cors", new Netty4CorsHandler(transport.getCorsConfig())); } - if (transport.pipelining) { - ch.pipeline().addLast("pipelining", new HttpPipeliningHandler(transport.logger, transport.pipeliningMaxEvents)); - } + ch.pipeline().addLast("pipelining", new Netty4HttpPipeliningHandler(transport.logger, transport.pipeliningMaxEvents)); ch.pipeline().addLast("handler", requestHandler); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedRequest.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedRequest.java deleted file mode 100644 index be1669c60c297..0000000000000 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedRequest.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.http.netty4.pipelining; - -import io.netty.channel.ChannelPromise; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.LastHttpContent; -import io.netty.util.ReferenceCounted; - -/** - * Permits downstream channel events to be ordered and signalled as to whether more are to come for - * a given sequence. - */ -public class HttpPipelinedRequest implements ReferenceCounted { - - private final LastHttpContent last; - private final int sequence; - - public HttpPipelinedRequest(final LastHttpContent last, final int sequence) { - this.last = last; - this.sequence = sequence; - } - - public LastHttpContent last() { - return last; - } - - public HttpPipelinedResponse createHttpResponse(final FullHttpResponse response, final ChannelPromise promise) { - return new HttpPipelinedResponse(response, promise, sequence); - } - - @Override - public int refCnt() { - return last.refCnt(); - } - - @Override - public ReferenceCounted retain() { - last.retain(); - return this; - } - - @Override - public ReferenceCounted retain(int increment) { - last.retain(increment); - return this; - } - - @Override - public ReferenceCounted touch() { - last.touch(); - return this; - } - - @Override - public ReferenceCounted touch(Object hint) { - last.touch(hint); - return this; - } - - @Override - public boolean release() { - return last.release(); - } - - @Override - public boolean release(int decrement) { - return last.release(decrement); - } - -} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedResponse.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedResponse.java deleted file mode 100644 index 6b6db94d69a59..0000000000000 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipelinedResponse.java +++ /dev/null @@ -1,94 +0,0 @@ -package org.elasticsearch.http.netty4.pipelining; - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import io.netty.channel.ChannelPromise; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.util.ReferenceCounted; - -class HttpPipelinedResponse implements Comparable, ReferenceCounted { - - private final FullHttpResponse response; - private final ChannelPromise promise; - private final int sequence; - - HttpPipelinedResponse(FullHttpResponse response, ChannelPromise promise, int sequence) { - this.response = response; - this.promise = promise; - this.sequence = sequence; - } - - public FullHttpResponse response() { - return response; - } - - public ChannelPromise promise() { - return promise; - } - - public int sequence() { - return sequence; - } - - @Override - public int compareTo(HttpPipelinedResponse o) { - return Integer.compare(sequence, o.sequence); - } - - @Override - public int refCnt() { - return response.refCnt(); - } - - @Override - public ReferenceCounted retain() { - response.retain(); - return this; - } - - @Override - public ReferenceCounted retain(int increment) { - response.retain(increment); - return this; - } - - @Override - public ReferenceCounted touch() { - response.touch(); - return this; - } - - @Override - public ReferenceCounted touch(Object hint) { - response.touch(hint); - return this; - } - - @Override - public boolean release() { - return response.release(); - } - - @Override - public boolean release(int decrement) { - return response.release(decrement); - } - -} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipeliningHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipeliningHandler.java deleted file mode 100644 index a90027c81482b..0000000000000 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/pipelining/HttpPipeliningHandler.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.http.netty4.pipelining; - -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import io.netty.handler.codec.http.LastHttpContent; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.transport.netty4.Netty4Utils; - -import java.nio.channels.ClosedChannelException; -import java.util.Collections; -import java.util.PriorityQueue; - -/** - * Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests. - */ -public class HttpPipeliningHandler extends ChannelDuplexHandler { - - // we use a priority queue so that responses are ordered by their sequence number - private final PriorityQueue holdingQueue; - - private final Logger logger; - private final int maxEventsHeld; - - /* - * The current read and write sequence numbers. Read sequence numbers are attached to requests in the order they are read from the - * channel, and then transferred to responses. A response is not written to the channel context until its sequence number matches the - * current write sequence, implying that all preceding messages have been written. - */ - private int readSequence; - private int writeSequence; - - /** - * Construct a new pipelining handler; this handler should be used downstream of HTTP decoding/aggregation. - * - * @param logger for logging unexpected errors - * @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is - * required as events cannot queue up indefinitely - */ - public HttpPipeliningHandler(Logger logger, final int maxEventsHeld) { - this.logger = logger; - this.maxEventsHeld = maxEventsHeld; - this.holdingQueue = new PriorityQueue<>(1); - } - - @Override - public void channelRead(final ChannelHandlerContext ctx, final Object msg) throws Exception { - if (msg instanceof LastHttpContent) { - ctx.fireChannelRead(new HttpPipelinedRequest(((LastHttpContent) msg).retain(), readSequence++)); - } else { - ctx.fireChannelRead(msg); - } - } - - @Override - public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) throws Exception { - if (msg instanceof HttpPipelinedResponse) { - final HttpPipelinedResponse current = (HttpPipelinedResponse) msg; - /* - * We attach the promise to the response. When we invoke a write on the channel with the response, we must ensure that we invoke - * the write methods that accept the same promise that we have attached to the response otherwise as the response proceeds - * through the handler pipeline a different promise will be used until reaching this handler. Therefore, we assert here that the - * attached promise is identical to the provided promise as a safety mechanism that we are respecting this. - */ - assert current.promise() == promise; - - boolean channelShouldClose = false; - - synchronized (holdingQueue) { - if (holdingQueue.size() < maxEventsHeld) { - holdingQueue.add(current); - - while (!holdingQueue.isEmpty()) { - /* - * Since the response with the lowest sequence number is the top of the priority queue, we know if its sequence - * number does not match the current write sequence number then we have not processed all preceding responses yet. - */ - final HttpPipelinedResponse top = holdingQueue.peek(); - if (top.sequence() != writeSequence) { - break; - } - holdingQueue.remove(); - /* - * We must use the promise attached to the response; this is necessary since are going to hold a response until all - * responses that precede it in the pipeline are written first. Note that the promise from the method invocation is - * not ignored, it will already be attached to an existing response and consumed when that response is drained. - */ - ctx.write(top.response(), top.promise()); - writeSequence++; - } - } else { - channelShouldClose = true; - } - } - - if (channelShouldClose) { - try { - Netty4Utils.closeChannels(Collections.singletonList(ctx.channel())); - } finally { - current.release(); - promise.setSuccess(); - } - } - } else { - ctx.write(msg, promise); - } - } - - @Override - public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception { - if (holdingQueue.isEmpty() == false) { - ClosedChannelException closedChannelException = new ClosedChannelException(); - HttpPipelinedResponse pipelinedResponse; - while ((pipelinedResponse = holdingQueue.poll()) != null) { - try { - pipelinedResponse.release(); - pipelinedResponse.promise().setFailure(closedChannelException); - } catch (Exception e) { - logger.error("unexpected error while releasing pipelined http responses", e); - } - } - } - ctx.close(promise); - } -} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java index 0ef1ea585b11c..7c5b35a322996 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java @@ -60,7 +60,6 @@ import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.NullDispatcher; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; -import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestResponse; @@ -212,12 +211,12 @@ public void testHeadersSet() { final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); httpRequest.headers().add(HttpHeaderNames.ORIGIN, "remote"); final WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel(); - Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); + final Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; // send a response Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); TestResponse resp = new TestResponse(); final String customHeader = "custom-header"; final String customHeaderValue = "xyz"; @@ -227,7 +226,7 @@ public void testHeadersSet() { // inspect what was written List writtenObjects = writeCapturingChannel.getWrittenObjects(); assertThat(writtenObjects.size(), is(1)); - HttpResponse response = (HttpResponse) writtenObjects.get(0); + HttpResponse response = ((Netty4HttpResponse) writtenObjects.get(0)).getResponse(); assertThat(response.headers().get("non-existent-header"), nullValue()); assertThat(response.headers().get(customHeader), equalTo(customHeaderValue)); assertThat(response.headers().get(HttpHeaderNames.CONTENT_LENGTH), equalTo(Integer.toString(resp.content().length()))); @@ -243,10 +242,9 @@ public void testReleaseOnSendToClosedChannel() { final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel); - final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null; HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, handlingSettings, threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); final TestResponse response = new TestResponse(bigArrays); assertThat(response.content(), instanceOf(Releasable.class)); embeddedChannel.close(); @@ -263,10 +261,9 @@ public void testReleaseOnSendToChannelAfterException() throws IOException { final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel); - final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null; HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, handlingSettings, threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); final BytesRestResponse response = new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, JsonXContent.contentBuilder().startObject().endObject()); assertThat(response.content(), not(instanceOf(Releasable.class))); @@ -312,7 +309,7 @@ public void testConnectionClose() throws Exception { assertTrue(embeddedChannel.isOpen()); HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); final TestResponse resp = new TestResponse(); channel.sendResponse(resp); assertThat(embeddedChannel.isOpen(), equalTo(!close)); @@ -340,13 +337,13 @@ private FullHttpResponse executeRequest(final Settings settings, final String or HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext()); channel.sendResponse(new TestResponse()); // get the response List writtenObjects = writeCapturingChannel.getWrittenObjects(); assertThat(writtenObjects.size(), is(1)); - return (FullHttpResponse) writtenObjects.get(0); + return ((Netty4HttpResponse) writtenObjects.get(0)).getResponse(); } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/pipelining/Netty4HttpPipeliningHandlerTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandlerTests.java similarity index 83% rename from modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/pipelining/Netty4HttpPipeliningHandlerTests.java rename to modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandlerTests.java index ffb6c8fb3569d..21151304424c1 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/pipelining/Netty4HttpPipeliningHandlerTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpPipeliningHandlerTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.http.netty4.pipelining; +package org.elasticsearch.http.netty4; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; @@ -37,6 +37,7 @@ import io.netty.handler.codec.http.LastHttpContent; import io.netty.handler.codec.http.QueryStringDecoder; import org.elasticsearch.common.Randomness; +import org.elasticsearch.http.HttpPipelinedRequest; import org.elasticsearch.test.ESTestCase; import org.junit.After; @@ -62,7 +63,8 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase { - private final ExecutorService executorService = Executors.newFixedThreadPool(randomIntBetween(4, 8)); + private final ExecutorService handlerService = Executors.newFixedThreadPool(randomIntBetween(4, 8)); + private final ExecutorService eventLoopService = Executors.newFixedThreadPool(1); private final Map waitingRequests = new ConcurrentHashMap<>(); private final Map finishingRequests = new ConcurrentHashMap<>(); @@ -79,15 +81,19 @@ private CountDownLatch finishRequest(String url) { } private void shutdownExecutorService() throws InterruptedException { - if (!executorService.isShutdown()) { - executorService.shutdown(); - executorService.awaitTermination(10, TimeUnit.SECONDS); + if (!handlerService.isShutdown()) { + handlerService.shutdown(); + handlerService.awaitTermination(10, TimeUnit.SECONDS); + } + if (!eventLoopService.isShutdown()) { + eventLoopService.shutdown(); + eventLoopService.awaitTermination(10, TimeUnit.SECONDS); } } public void testThatPipeliningWorksWithFastSerializedRequests() throws InterruptedException { final int numberOfRequests = randomIntBetween(2, 128); - final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests), + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests), new WorkEmulatorHandler()); for (int i = 0; i < numberOfRequests; i++) { @@ -114,7 +120,7 @@ public void testThatPipeliningWorksWithFastSerializedRequests() throws Interrupt public void testThatPipeliningWorksWhenSlowRequestsInDifferentOrder() throws InterruptedException { final int numberOfRequests = randomIntBetween(2, 128); - final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests), + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests), new WorkEmulatorHandler()); for (int i = 0; i < numberOfRequests; i++) { @@ -147,7 +153,7 @@ public void testThatPipeliningWorksWithChunkedRequests() throws InterruptedExcep final EmbeddedChannel embeddedChannel = new EmbeddedChannel( new AggregateUrisAndHeadersHandler(), - new HttpPipeliningHandler(logger, numberOfRequests), + new Netty4HttpPipeliningHandler(logger, numberOfRequests), new WorkEmulatorHandler()); for (int i = 0; i < numberOfRequests; i++) { @@ -176,7 +182,7 @@ public void testThatPipeliningWorksWithChunkedRequests() throws InterruptedExcep public void testThatPipeliningClosesConnectionWithTooManyEvents() throws InterruptedException { final int numberOfRequests = randomIntBetween(2, 128); - final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests), + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests), new WorkEmulatorHandler()); for (int i = 0; i < 1 + numberOfRequests + 1; i++) { @@ -184,7 +190,7 @@ public void testThatPipeliningClosesConnectionWithTooManyEvents() throws Interru } final List latches = new ArrayList<>(); - final List requests = IntStream.range(1, numberOfRequests + 1).mapToObj(r -> r).collect(Collectors.toList()); + final List requests = IntStream.range(1, numberOfRequests + 1).boxed().collect(Collectors.toList()); Randomness.shuffle(requests); for (final Integer request : requests) { @@ -205,25 +211,26 @@ public void testThatPipeliningClosesConnectionWithTooManyEvents() throws Interru public void testPipeliningRequestsAreReleased() throws InterruptedException { final int numberOfRequests = 10; final EmbeddedChannel embeddedChannel = - new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests + 1)); + new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests + 1)); for (int i = 0; i < numberOfRequests; i++) { embeddedChannel.writeInbound(createHttpRequest("/" + i)); } - HttpPipelinedRequest inbound; - ArrayList requests = new ArrayList<>(); + HttpPipelinedRequest inbound; + ArrayList> requests = new ArrayList<>(); while ((inbound = embeddedChannel.readInbound()) != null) { requests.add(inbound); } ArrayList promises = new ArrayList<>(); for (int i = 1; i < requests.size(); ++i) { - final DefaultFullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK); + final FullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK); ChannelPromise promise = embeddedChannel.newPromise(); promises.add(promise); - HttpPipelinedResponse response = requests.get(i).createHttpResponse(httpResponse, promise); - embeddedChannel.writeAndFlush(response, promise); + int sequence = requests.get(i).getSequence(); + Netty4HttpResponse resp = new Netty4HttpResponse(sequence, httpResponse); + embeddedChannel.writeAndFlush(resp, promise); } for (ChannelPromise promise : promises) { @@ -260,14 +267,14 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpRequest request) thro } - private class WorkEmulatorHandler extends SimpleChannelInboundHandler { + private class WorkEmulatorHandler extends SimpleChannelInboundHandler> { @Override - protected void channelRead0(final ChannelHandlerContext ctx, final HttpPipelinedRequest pipelinedRequest) throws Exception { + protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedRequest pipelinedRequest) { + LastHttpContent request = pipelinedRequest.getRequest(); final QueryStringDecoder decoder; - if (pipelinedRequest.last() instanceof FullHttpRequest) { - final FullHttpRequest fullHttpRequest = (FullHttpRequest) pipelinedRequest.last(); - decoder = new QueryStringDecoder(fullHttpRequest.uri()); + if (request instanceof FullHttpRequest) { + decoder = new QueryStringDecoder(((FullHttpRequest)request).uri()); } else { decoder = new QueryStringDecoder(AggregateUrisAndHeadersHandler.QUEUE_URI.poll()); } @@ -282,12 +289,14 @@ protected void channelRead0(final ChannelHandlerContext ctx, final HttpPipelined final CountDownLatch finishingLatch = new CountDownLatch(1); finishingRequests.put(uri, finishingLatch); - executorService.submit(() -> { + handlerService.submit(() -> { try { waitingLatch.await(1000, TimeUnit.SECONDS); final ChannelPromise promise = ctx.newPromise(); - ctx.write(pipelinedRequest.createHttpResponse(httpResponse, promise), promise); - finishingLatch.countDown(); + eventLoopService.submit(() -> { + ctx.write(new Netty4HttpResponse(pipelinedRequest.getSequence(), httpResponse), promise); + finishingLatch.countDown(); + }); } catch (InterruptedException e) { fail(e.toString()); } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java index 0eb14a8a76e9b..f2b28b909187b 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -38,9 +38,9 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.HttpPipelinedRequest; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.NullDispatcher; -import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -52,16 +52,11 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; -import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; -import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.hasSize; /** * This test just tests, if he pipelining works in general with out any connection the Elasticsearch handler @@ -85,9 +80,8 @@ public void shutdown() throws Exception { } } - public void testThatHttpPipeliningWorksWhenEnabled() throws Exception { + public void testThatHttpPipeliningWorks() throws Exception { final Settings settings = Settings.builder() - .put("http.pipelining", true) .put("http.port", "0") .build(); try (HttpServerTransport httpServerTransport = new CustomNettyHttpServerTransport(settings)) { @@ -112,48 +106,6 @@ public void testThatHttpPipeliningWorksWhenEnabled() throws Exception { } } - public void testThatHttpPipeliningCanBeDisabled() throws Exception { - final Settings settings = Settings.builder() - .put("http.pipelining", false) - .put("http.port", "0") - .build(); - try (HttpServerTransport httpServerTransport = new CustomNettyHttpServerTransport(settings)) { - httpServerTransport.start(); - final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); - - final int numberOfRequests = randomIntBetween(4, 16); - final Set slowIds = new HashSet<>(); - final List requests = new ArrayList<>(numberOfRequests); - for (int i = 0; i < numberOfRequests; i++) { - if (rarely()) { - requests.add("/slow/" + i); - slowIds.add(i); - } else { - requests.add("/" + i); - } - } - - try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { - Collection responses = nettyHttpClient.get(transportAddress.address(), requests.toArray(new String[]{})); - List responseBodies = new ArrayList<>(Netty4HttpClient.returnHttpResponseBodies(responses)); - // we can not be sure about the order of the responses, but the slow ones should come last - assertThat(responseBodies, hasSize(numberOfRequests)); - for (int i = 0; i < numberOfRequests - slowIds.size(); i++) { - assertThat(responseBodies.get(i), matches("/\\d+")); - } - - final Set ids = new HashSet<>(); - for (int i = 0; i < slowIds.size(); i++) { - final String response = responseBodies.get(numberOfRequests - slowIds.size() + i); - assertThat(response, matches("/slow/\\d+" )); - assertTrue(ids.add(Integer.parseInt(response.split("/")[2]))); - } - - assertThat(slowIds, equalTo(ids)); - } - } - } - class CustomNettyHttpServerTransport extends Netty4HttpServerTransport { private final ExecutorService executorService = Executors.newCachedThreadPool(); @@ -196,7 +148,7 @@ protected void initChannel(Channel ch) throws Exception { } - class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler { + class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler> { private final ExecutorService executorService; @@ -205,7 +157,7 @@ class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler { } @Override - protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception { + protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest msg) throws Exception { executorService.submit(new PossiblySlowRunnable(ctx, msg)); } @@ -220,26 +172,18 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E class PossiblySlowRunnable implements Runnable { private ChannelHandlerContext ctx; - private HttpPipelinedRequest pipelinedRequest; + private HttpPipelinedRequest pipelinedRequest; private FullHttpRequest fullHttpRequest; - PossiblySlowRunnable(ChannelHandlerContext ctx, Object msg) { + PossiblySlowRunnable(ChannelHandlerContext ctx, HttpPipelinedRequest msg) { this.ctx = ctx; - if (msg instanceof HttpPipelinedRequest) { - this.pipelinedRequest = (HttpPipelinedRequest) msg; - } else if (msg instanceof FullHttpRequest) { - this.fullHttpRequest = (FullHttpRequest) msg; - } + this.pipelinedRequest = msg; + this.fullHttpRequest = pipelinedRequest.getRequest(); } @Override public void run() { - final String uri; - if (pipelinedRequest != null && pipelinedRequest.last() instanceof FullHttpRequest) { - uri = ((FullHttpRequest) pipelinedRequest.last()).uri(); - } else { - uri = fullHttpRequest.uri(); - } + final String uri = fullHttpRequest.uri(); final ByteBuf buffer = Unpooled.copiedBuffer(uri, StandardCharsets.UTF_8); @@ -258,13 +202,7 @@ public void run() { } final ChannelPromise promise = ctx.newPromise(); - final Object msg; - if (pipelinedRequest != null) { - msg = pipelinedRequest.createHttpResponse(httpResponse, promise); - } else { - msg = httpResponse; - } - ctx.writeAndFlush(msg, promise); + ctx.writeAndFlush(new Netty4HttpResponse(pipelinedRequest.getSequence(), httpResponse), promise); } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java similarity index 87% rename from modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java rename to modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java index 9723ee93faf59..ebb91d9663ed5 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningEnabledIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningIT.java @@ -21,8 +21,6 @@ import io.netty.handler.codec.http.FullHttpResponse; import org.elasticsearch.ESNetty4IntegTestCase; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -35,21 +33,13 @@ import static org.hamcrest.Matchers.is; @ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) -public class Netty4PipeliningEnabledIT extends ESNetty4IntegTestCase { +public class Netty4PipeliningIT extends ESNetty4IntegTestCase { @Override protected boolean addMockHttpTransport() { return false; // enable http } - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("http.pipelining", true) - .build(); - } - public void testThatNettyHttpServerSupportsPipelining() throws Exception { String[] requests = new String[]{"/", "/_nodes/stats", "/", "/_cluster/state", "/"}; diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java index f1d18ddacbd13..e3481e3c254d2 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java @@ -25,20 +25,21 @@ import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.DefaultHttpHeaders; import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpContentCompressor; import io.netty.handler.codec.http.HttpContentDecompressor; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.http.HttpPipelinedRequest; import org.elasticsearch.nio.FlushOperation; import org.elasticsearch.nio.InboundChannelBuffer; -import org.elasticsearch.nio.ReadWriteHandler; import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.ReadWriteHandler; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.WriteOperation; import org.elasticsearch.rest.RestRequest; @@ -77,6 +78,7 @@ public class HttpReadWriteHandler implements ReadWriteHandler { if (settings.isCompression()) { handlers.add(new HttpContentCompressor(settings.getCompressionLevel())); } + handlers.add(new NioHttpPipeliningHandler(transport.getLogger(), settings.getPipeliningMaxEvents())); adaptor = new NettyAdaptor(handlers.toArray(new ChannelHandler[0])); adaptor.addCloseListener((v, e) -> nioChannel.close()); @@ -95,9 +97,9 @@ public int consumeReads(InboundChannelBuffer channelBuffer) throws IOException { @Override public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer listener) { - assert message instanceof FullHttpResponse : "This channel only supports messages that are of type: " + FullHttpResponse.class - + ". Found type: " + message.getClass() + "."; - return new HttpWriteOperation(context, (FullHttpResponse) message, listener); + assert message instanceof NioHttpResponse : "This channel only supports messages that are of type: " + + NioHttpResponse.class + ". Found type: " + message.getClass() + "."; + return new HttpWriteOperation(context, (NioHttpResponse) message, listener); } @Override @@ -125,76 +127,85 @@ public void close() throws IOException { } } + @SuppressWarnings("unchecked") private void handleRequest(Object msg) { - final FullHttpRequest request = (FullHttpRequest) msg; + final HttpPipelinedRequest pipelinedRequest = (HttpPipelinedRequest) msg; + FullHttpRequest request = pipelinedRequest.getRequest(); - final FullHttpRequest copiedRequest = - new DefaultFullHttpRequest( - request.protocolVersion(), - request.method(), - request.uri(), - Unpooled.copiedBuffer(request.content()), - request.headers(), - request.trailingHeaders()); - - Exception badRequestCause = null; - - /* - * We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there - * are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we - * attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header, - * or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the - * underlying exception that caused us to treat the request as bad. - */ - final NioHttpRequest httpRequest; - { - NioHttpRequest innerHttpRequest; - try { - innerHttpRequest = new NioHttpRequest(xContentRegistry, copiedRequest); - } catch (final RestRequest.ContentTypeHeaderException e) { - badRequestCause = e; - innerHttpRequest = requestWithoutContentTypeHeader(copiedRequest, badRequestCause); - } catch (final RestRequest.BadParameterException e) { - badRequestCause = e; - innerHttpRequest = requestWithoutParameters(copiedRequest); + try { + final FullHttpRequest copiedRequest = + new DefaultFullHttpRequest( + request.protocolVersion(), + request.method(), + request.uri(), + Unpooled.copiedBuffer(request.content()), + request.headers(), + request.trailingHeaders()); + + Exception badRequestCause = null; + + /* + * We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there + * are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we + * attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header, + * or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the + * underlying exception that caused us to treat the request as bad. + */ + final NioHttpRequest httpRequest; + { + NioHttpRequest innerHttpRequest; + try { + innerHttpRequest = new NioHttpRequest(xContentRegistry, copiedRequest); + } catch (final RestRequest.ContentTypeHeaderException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutContentTypeHeader(copiedRequest, badRequestCause); + } catch (final RestRequest.BadParameterException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutParameters(copiedRequest); + } + httpRequest = innerHttpRequest; } - httpRequest = innerHttpRequest; - } - /* - * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid - * parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an - * IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of these - * parameter values. - */ - final NioHttpChannel channel; - { - NioHttpChannel innerChannel; - try { - innerChannel = new NioHttpChannel(nioChannel, transport.getBigArrays(), httpRequest, settings, threadContext); - } catch (final IllegalArgumentException e) { - if (badRequestCause == null) { - badRequestCause = e; - } else { - badRequestCause.addSuppressed(e); + /* + * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid + * parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an + * IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of + * these parameter values. + */ + final NioHttpChannel channel; + { + NioHttpChannel innerChannel; + int sequence = pipelinedRequest.getSequence(); + BigArrays bigArrays = transport.getBigArrays(); + try { + innerChannel = new NioHttpChannel(nioChannel, bigArrays, httpRequest, sequence, settings, threadContext); + } catch (final IllegalArgumentException e) { + if (badRequestCause == null) { + badRequestCause = e; + } else { + badRequestCause.addSuppressed(e); + } + final NioHttpRequest innerRequest = + new NioHttpRequest( + xContentRegistry, + Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters + copiedRequest.uri(), + copiedRequest); + innerChannel = new NioHttpChannel(nioChannel, bigArrays, innerRequest, sequence, settings, threadContext); } - final NioHttpRequest innerRequest = - new NioHttpRequest( - xContentRegistry, - Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters - copiedRequest.uri(), - copiedRequest); - innerChannel = new NioHttpChannel(nioChannel, transport.getBigArrays(), innerRequest, settings, threadContext); + channel = innerChannel; } - channel = innerChannel; - } - if (request.decoderResult().isFailure()) { - transport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); - } else if (badRequestCause != null) { - transport.dispatchBadRequest(httpRequest, channel, badRequestCause); - } else { - transport.dispatchRequest(httpRequest, channel); + if (request.decoderResult().isFailure()) { + transport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); + } else if (badRequestCause != null) { + transport.dispatchBadRequest(httpRequest, channel, badRequestCause); + } else { + transport.dispatchRequest(httpRequest, channel); + } + } finally { + // As we have copied the buffer, we can release the request + request.release(); } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java index c838ae85e9d40..8ddce7a5b73b5 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java @@ -19,7 +19,6 @@ package org.elasticsearch.http.nio; -import io.netty.handler.codec.http.FullHttpResponse; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.WriteOperation; @@ -28,10 +27,10 @@ public class HttpWriteOperation implements WriteOperation { private final SocketChannelContext channelContext; - private final FullHttpResponse response; + private final NioHttpResponse response; private final BiConsumer listener; - HttpWriteOperation(SocketChannelContext channelContext, FullHttpResponse response, BiConsumer listener) { + HttpWriteOperation(SocketChannelContext channelContext, NioHttpResponse response, BiConsumer listener) { this.channelContext = channelContext; this.response = response; this.listener = listener; @@ -48,7 +47,7 @@ public SocketChannelContext getChannel() { } @Override - public FullHttpResponse getObject() { + public NioHttpResponse getObject() { return response; } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java index 3344a31264121..cf8c92bff905c 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java @@ -53,12 +53,7 @@ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) try { ByteBuf message = (ByteBuf) msg; promise.addListener((f) -> message.release()); - NettyListener listener; - if (promise instanceof NettyListener) { - listener = (NettyListener) promise; - } else { - listener = new NettyListener(promise); - } + NettyListener listener = NettyListener.fromChannelPromise(promise); flushOperations.add(new FlushOperation(message.nioBuffers(), listener)); } catch (Exception e) { promise.setFailure(e); @@ -107,18 +102,7 @@ public Object pollInboundMessage() { } public void write(WriteOperation writeOperation) { - ChannelPromise channelPromise = nettyChannel.newPromise(); - channelPromise.addListener(f -> { - BiConsumer consumer = writeOperation.getListener(); - if (f.cause() == null) { - consumer.accept(null, null); - } else { - ExceptionsHelper.dieOnError(f.cause()); - consumer.accept(null, f.cause()); - } - }); - - nettyChannel.writeAndFlush(writeOperation.getObject(), new NettyListener(channelPromise)); + nettyChannel.writeAndFlush(writeOperation.getObject(), NettyListener.fromBiConsumer(writeOperation.getListener(), nettyChannel)); } public FlushOperation pollOutboundOperation() { diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java index e806b0d23ce3a..b907c0f2bc6f6 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java @@ -23,7 +23,7 @@ import io.netty.channel.ChannelPromise; import io.netty.util.concurrent.Future; import io.netty.util.concurrent.GenericFutureListener; -import org.elasticsearch.action.ActionListener; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.util.concurrent.FutureUtils; import java.util.concurrent.ExecutionException; @@ -40,7 +40,7 @@ public class NettyListener implements BiConsumer, ChannelPromis private final ChannelPromise promise; - NettyListener(ChannelPromise promise) { + private NettyListener(ChannelPromise promise) { this.promise = promise; } @@ -211,4 +211,30 @@ public boolean isVoid() { public ChannelPromise unvoid() { return promise.unvoid(); } + + public static NettyListener fromBiConsumer(BiConsumer biConsumer, Channel channel) { + if (biConsumer instanceof NettyListener) { + return (NettyListener) biConsumer; + } else { + ChannelPromise channelPromise = channel.newPromise(); + channelPromise.addListener(f -> { + if (f.cause() == null) { + biConsumer.accept(null, null); + } else { + ExceptionsHelper.dieOnError(f.cause()); + biConsumer.accept(null, f.cause()); + } + }); + + return new NettyListener(channelPromise); + } + } + + public static NettyListener fromChannelPromise(ChannelPromise channelPromise) { + if (channelPromise instanceof NettyListener) { + return (NettyListener) channelPromise; + } else { + return new NettyListener(channelPromise); + } + } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java index 672c6d5abad0e..97eba20a16f16 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java @@ -52,20 +52,23 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.BiConsumer; public class NioHttpChannel extends AbstractRestChannel { private final BigArrays bigArrays; + private final int sequence; private final ThreadContext threadContext; private final FullHttpRequest nettyRequest; private final NioSocketChannel nioChannel; private final boolean resetCookies; - NioHttpChannel(NioSocketChannel nioChannel, BigArrays bigArrays, NioHttpRequest request, + NioHttpChannel(NioSocketChannel nioChannel, BigArrays bigArrays, NioHttpRequest request, int sequence, HttpHandlingSettings settings, ThreadContext threadContext) { super(request, settings.getDetailedErrorsEnabled()); this.nioChannel = nioChannel; this.bigArrays = bigArrays; + this.sequence = sequence; this.threadContext = threadContext; this.nettyRequest = request.getRequest(); this.resetCookies = settings.isResetCookies(); @@ -117,9 +120,8 @@ public void sendResponse(RestResponse response) { toClose.add(nioChannel::close); } - nioChannel.getContext().sendMessage(resp, (aVoid, throwable) -> { - Releasables.close(toClose); - }); + BiConsumer listener = (aVoid, throwable) -> Releasables.close(toClose); + nioChannel.getContext().sendMessage(new NioHttpResponse(sequence, resp), listener); success = true; } finally { if (success == false) { diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpPipeliningHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpPipeliningHandler.java new file mode 100644 index 0000000000000..57a14e7819d4e --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpPipeliningHandler.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.handler.codec.http.LastHttpContent; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.http.HttpPipelinedRequest; +import org.elasticsearch.http.HttpPipeliningAggregator; +import org.elasticsearch.http.nio.NettyListener; +import org.elasticsearch.http.nio.NioHttpResponse; + +import java.nio.channels.ClosedChannelException; +import java.util.List; + +/** + * Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests. + */ +public class NioHttpPipeliningHandler extends ChannelDuplexHandler { + + private final Logger logger; + private final HttpPipeliningAggregator aggregator; + + /** + * Construct a new pipelining handler; this handler should be used downstream of HTTP decoding/aggregation. + * + * @param logger for logging unexpected errors + * @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is + * required as events cannot queue up indefinitely + */ + public NioHttpPipeliningHandler(Logger logger, final int maxEventsHeld) { + this.logger = logger; + this.aggregator = new HttpPipeliningAggregator<>(maxEventsHeld); + } + + @Override + public void channelRead(final ChannelHandlerContext ctx, final Object msg) { + if (msg instanceof LastHttpContent) { + HttpPipelinedRequest pipelinedRequest = aggregator.read(((LastHttpContent) msg).retain()); + ctx.fireChannelRead(pipelinedRequest); + } else { + ctx.fireChannelRead(msg); + } + } + + @Override + public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) { + assert msg instanceof NioHttpResponse : "Message must be type: " + NioHttpResponse.class; + NioHttpResponse response = (NioHttpResponse) msg; + boolean success = false; + try { + NettyListener listener = NettyListener.fromChannelPromise(promise); + List> readyResponses = aggregator.write(response, listener); + success = true; + for (Tuple responseToWrite : readyResponses) { + ctx.write(responseToWrite.v1().getResponse(), responseToWrite.v2()); + } + } catch (IllegalStateException e) { + ctx.channel().close(); + } finally { + if (success == false) { + promise.setFailure(new ClosedChannelException()); + } + } + } + + @Override + public void close(ChannelHandlerContext ctx, ChannelPromise promise) { + List> inflightResponses = aggregator.removeAllInflightResponses(); + + if (inflightResponses.isEmpty() == false) { + ClosedChannelException closedChannelException = new ClosedChannelException(); + for (Tuple inflightResponse : inflightResponses) { + try { + inflightResponse.v2().setFailure(closedChannelException); + } catch (RuntimeException e) { + logger.error("unexpected error while releasing pipelined http responses", e); + } + } + } + ctx.close(promise); + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java new file mode 100644 index 0000000000000..4b634994b4557 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpResponse.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.handler.codec.http.FullHttpResponse; +import org.elasticsearch.http.HttpPipelinedMessage; + +public class NioHttpResponse extends HttpPipelinedMessage { + + private final FullHttpResponse response; + + public NioHttpResponse(int sequence, FullHttpResponse response) { + super(sequence); + this.response = response; + } + + public FullHttpResponse getResponse() { + return response; + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java index 06f581d7ab701..825a023bd51bc 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java @@ -20,6 +20,7 @@ package org.elasticsearch.http.nio; import io.netty.handler.timeout.ReadTimeoutException; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; @@ -84,6 +85,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; public class NioHttpServerTransport extends AbstractHttpServerTransport { @@ -124,6 +126,7 @@ public NioHttpServerTransport(Settings settings, NetworkService networkService, ByteSizeValue maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); ByteSizeValue maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); ByteSizeValue maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); + int pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()), Math.toIntExact(maxChunkSize.getBytes()), Math.toIntExact(maxHeaderSize.getBytes()), @@ -131,7 +134,8 @@ public NioHttpServerTransport(Settings settings, NetworkService networkService, SETTING_HTTP_RESET_COOKIES.get(settings), SETTING_HTTP_COMPRESSION.get(settings), SETTING_HTTP_COMPRESSION_LEVEL.get(settings), - SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings)); + SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings), + pipeliningMaxEvents); this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings); this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings); @@ -140,14 +144,19 @@ public NioHttpServerTransport(Settings settings, NetworkService networkService, this.tcpReceiveBufferSize = Math.toIntExact(SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings).getBytes()); - logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}]", - maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength); + logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}]," + + " pipelining_max_events[{}]", + maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength, pipeliningMaxEvents); } BigArrays getBigArrays() { return bigArrays; } + public Logger getLogger() { + return logger; + } + @Override protected void doStart() { boolean success = false; diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java index e0c8bacca1d85..703f7acbf8257 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/NioIntegTestCase.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.http.nio.NioHttpServerTransport; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.nio.NioTransport; @@ -43,11 +44,13 @@ protected boolean addMockTransportService() { @Override protected Settings nodeSettings(int nodeOrdinal) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); - // randomize netty settings + // randomize nio settings if (randomBoolean()) { builder.put(NioTransport.NIO_WORKER_COUNT.getKey(), random().nextInt(3) + 1); + builder.put(NioHttpServerTransport.NIO_HTTP_WORKER_COUNT.getKey(), random().nextInt(3) + 1); } builder.put(NetworkModule.TRANSPORT_TYPE_KEY, NioTransportPlugin.NIO_TRANSPORT_NAME); + builder.put(NetworkModule.HTTP_TYPE_KEY, NioTransportPlugin.NIO_HTTP_TRANSPORT_NAME); return builder.build(); } diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java index dce8319d2fc82..cc8eeb77cc2f6 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java @@ -61,11 +61,11 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; public class HttpReadWriteHandlerTests extends ESTestCase { @@ -91,7 +91,8 @@ public void setMocks() { SETTING_HTTP_RESET_COOKIES.getDefault(settings), SETTING_HTTP_COMPRESSION.getDefault(settings), SETTING_HTTP_COMPRESSION_LEVEL.getDefault(settings), - SETTING_HTTP_DETAILED_ERRORS_ENABLED.getDefault(settings)); + SETTING_HTTP_DETAILED_ERRORS_ENABLED.getDefault(settings), + SETTING_PIPELINING_MAX_EVENTS.getDefault(settings)); ThreadContext threadContext = new ThreadContext(settings); nioSocketChannel = mock(NioSocketChannel.class); handler = new HttpReadWriteHandler(nioSocketChannel, transport, httpHandlingSettings, NamedXContentRegistry.EMPTY, threadContext); @@ -148,7 +149,8 @@ public void testDecodeHttpRequestContentLengthToLongGeneratesOutboundMessage() t handler.consumeReads(toChannelBuffer(buf)); - verifyZeroInteractions(transport); + verify(transport, times(0)).dispatchBadRequest(any(), any(), any()); + verify(transport, times(0)).dispatchRequest(any(), any()); List flushOperations = handler.pollFlushOperations(); assertFalse(flushOperations.isEmpty()); @@ -169,9 +171,10 @@ public void testEncodeHttpResponse() throws IOException { prepareHandlerForResponse(handler); FullHttpResponse fullHttpResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + NioHttpResponse pipelinedResponse = new NioHttpResponse(0, fullHttpResponse); SocketChannelContext context = mock(SocketChannelContext.class); - HttpWriteOperation writeOperation = new HttpWriteOperation(context, fullHttpResponse, mock(BiConsumer.class)); + HttpWriteOperation writeOperation = new HttpWriteOperation(context, pipelinedResponse, mock(BiConsumer.class)); List flushOperations = handler.writeToBytes(writeOperation); HttpResponse response = responseDecoder.decode(Unpooled.wrappedBuffer(flushOperations.get(0).getBuffersToWrite())); diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpPipeliningHandlerTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpPipeliningHandlerTests.java new file mode 100644 index 0000000000000..d12c608aeca2a --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpPipeliningHandlerTests.java @@ -0,0 +1,304 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPromise; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http.LastHttpContent; +import io.netty.handler.codec.http.QueryStringDecoder; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.http.HttpPipelinedRequest; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; + +import java.nio.channels.ClosedChannelException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.LinkedTransferQueue; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; +import static io.netty.handler.codec.http.HttpResponseStatus.OK; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; +import static org.hamcrest.core.Is.is; + +public class NioHttpPipeliningHandlerTests extends ESTestCase { + + private final ExecutorService handlerService = Executors.newFixedThreadPool(randomIntBetween(4, 8)); + private final ExecutorService eventLoopService = Executors.newFixedThreadPool(1); + private final Map waitingRequests = new ConcurrentHashMap<>(); + private final Map finishingRequests = new ConcurrentHashMap<>(); + + @After + public void cleanup() throws Exception { + waitingRequests.keySet().forEach(this::finishRequest); + shutdownExecutorService(); + } + + private CountDownLatch finishRequest(String url) { + waitingRequests.get(url).countDown(); + return finishingRequests.get(url); + } + + private void shutdownExecutorService() throws InterruptedException { + if (!handlerService.isShutdown()) { + handlerService.shutdown(); + handlerService.awaitTermination(10, TimeUnit.SECONDS); + } + if (!eventLoopService.isShutdown()) { + eventLoopService.shutdown(); + eventLoopService.awaitTermination(10, TimeUnit.SECONDS); + } + } + + public void testThatPipeliningWorksWithFastSerializedRequests() throws InterruptedException { + final int numberOfRequests = randomIntBetween(2, 128); + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests), + new WorkEmulatorHandler()); + + for (int i = 0; i < numberOfRequests; i++) { + embeddedChannel.writeInbound(createHttpRequest("/" + String.valueOf(i))); + } + + final List latches = new ArrayList<>(); + for (final String url : waitingRequests.keySet()) { + latches.add(finishRequest(url)); + } + + for (final CountDownLatch latch : latches) { + latch.await(); + } + + embeddedChannel.flush(); + + for (int i = 0; i < numberOfRequests; i++) { + assertReadHttpMessageHasContent(embeddedChannel, String.valueOf(i)); + } + + assertTrue(embeddedChannel.isOpen()); + } + + public void testThatPipeliningWorksWhenSlowRequestsInDifferentOrder() throws InterruptedException { + final int numberOfRequests = randomIntBetween(2, 128); + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests), + new WorkEmulatorHandler()); + + for (int i = 0; i < numberOfRequests; i++) { + embeddedChannel.writeInbound(createHttpRequest("/" + String.valueOf(i))); + } + + // random order execution + final List urls = new ArrayList<>(waitingRequests.keySet()); + Randomness.shuffle(urls); + final List latches = new ArrayList<>(); + for (final String url : urls) { + latches.add(finishRequest(url)); + } + + for (final CountDownLatch latch : latches) { + latch.await(); + } + + embeddedChannel.flush(); + + for (int i = 0; i < numberOfRequests; i++) { + assertReadHttpMessageHasContent(embeddedChannel, String.valueOf(i)); + } + + assertTrue(embeddedChannel.isOpen()); + } + + public void testThatPipeliningWorksWithChunkedRequests() throws InterruptedException { + final int numberOfRequests = randomIntBetween(2, 128); + final EmbeddedChannel embeddedChannel = + new EmbeddedChannel( + new AggregateUrisAndHeadersHandler(), + new NioHttpPipeliningHandler(logger, numberOfRequests), + new WorkEmulatorHandler()); + + for (int i = 0; i < numberOfRequests; i++) { + final DefaultHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/" + i); + embeddedChannel.writeInbound(request); + embeddedChannel.writeInbound(LastHttpContent.EMPTY_LAST_CONTENT); + } + + final List latches = new ArrayList<>(); + for (int i = numberOfRequests - 1; i >= 0; i--) { + latches.add(finishRequest(Integer.toString(i))); + } + + for (final CountDownLatch latch : latches) { + latch.await(); + } + + embeddedChannel.flush(); + + for (int i = 0; i < numberOfRequests; i++) { + assertReadHttpMessageHasContent(embeddedChannel, Integer.toString(i)); + } + + assertTrue(embeddedChannel.isOpen()); + } + + public void testThatPipeliningClosesConnectionWithTooManyEvents() throws InterruptedException { + final int numberOfRequests = randomIntBetween(2, 128); + final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests), + new WorkEmulatorHandler()); + + for (int i = 0; i < 1 + numberOfRequests + 1; i++) { + embeddedChannel.writeInbound(createHttpRequest("/" + Integer.toString(i))); + } + + final List latches = new ArrayList<>(); + final List requests = IntStream.range(1, numberOfRequests + 1).boxed().collect(Collectors.toList()); + Randomness.shuffle(requests); + + for (final Integer request : requests) { + latches.add(finishRequest(request.toString())); + } + + for (final CountDownLatch latch : latches) { + latch.await(); + } + + finishRequest(Integer.toString(numberOfRequests + 1)).await(); + + embeddedChannel.flush(); + + assertFalse(embeddedChannel.isOpen()); + } + + public void testPipeliningRequestsAreReleased() throws InterruptedException { + final int numberOfRequests = 10; + final EmbeddedChannel embeddedChannel = + new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests + 1)); + + for (int i = 0; i < numberOfRequests; i++) { + embeddedChannel.writeInbound(createHttpRequest("/" + i)); + } + + HttpPipelinedRequest inbound; + ArrayList> requests = new ArrayList<>(); + while ((inbound = embeddedChannel.readInbound()) != null) { + requests.add(inbound); + } + + ArrayList promises = new ArrayList<>(); + for (int i = 1; i < requests.size(); ++i) { + final FullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK); + ChannelPromise promise = embeddedChannel.newPromise(); + promises.add(promise); + int sequence = requests.get(i).getSequence(); + NioHttpResponse resp = new NioHttpResponse(sequence, httpResponse); + embeddedChannel.writeAndFlush(resp, promise); + } + + for (ChannelPromise promise : promises) { + assertFalse(promise.isDone()); + } + embeddedChannel.close().syncUninterruptibly(); + for (ChannelPromise promise : promises) { + assertTrue(promise.isDone()); + assertTrue(promise.cause() instanceof ClosedChannelException); + } + } + + private void assertReadHttpMessageHasContent(EmbeddedChannel embeddedChannel, String expectedContent) { + FullHttpResponse response = (FullHttpResponse) embeddedChannel.outboundMessages().poll(); + assertNotNull("Expected response to exist, maybe you did not wait long enough?", response); + assertNotNull("Expected response to have content " + expectedContent, response.content()); + String data = new String(ByteBufUtil.getBytes(response.content()), StandardCharsets.UTF_8); + assertThat(data, is(expectedContent)); + } + + private FullHttpRequest createHttpRequest(String uri) { + return new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uri); + } + + private static class AggregateUrisAndHeadersHandler extends SimpleChannelInboundHandler { + + static final Queue QUEUE_URI = new LinkedTransferQueue<>(); + + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpRequest request) throws Exception { + QUEUE_URI.add(request.uri()); + } + + } + + private class WorkEmulatorHandler extends SimpleChannelInboundHandler> { + + @Override + protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedRequest pipelinedRequest) { + LastHttpContent request = pipelinedRequest.getRequest(); + final QueryStringDecoder decoder; + if (request instanceof FullHttpRequest) { + decoder = new QueryStringDecoder(((FullHttpRequest)request).uri()); + } else { + decoder = new QueryStringDecoder(AggregateUrisAndHeadersHandler.QUEUE_URI.poll()); + } + + final String uri = decoder.path().replace("/", ""); + final ByteBuf content = Unpooled.copiedBuffer(uri, StandardCharsets.UTF_8); + final DefaultFullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK, content); + httpResponse.headers().add(CONTENT_LENGTH, content.readableBytes()); + + final CountDownLatch waitingLatch = new CountDownLatch(1); + waitingRequests.put(uri, waitingLatch); + final CountDownLatch finishingLatch = new CountDownLatch(1); + finishingRequests.put(uri, finishingLatch); + + handlerService.submit(() -> { + try { + waitingLatch.await(1000, TimeUnit.SECONDS); + final ChannelPromise promise = ctx.newPromise(); + eventLoopService.submit(() -> { + ctx.write(new NioHttpResponse(pipelinedRequest.getSequence(), httpResponse), promise); + finishingLatch.countDown(); + }); + } catch (InterruptedException e) { + fail(e.toString()); + } + }); + } + } +} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioPipeliningIT.java similarity index 53% rename from modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java rename to plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioPipeliningIT.java index af0e7c85a8f63..074aafd6eab4b 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4PipeliningDisabledIT.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioPipeliningIT.java @@ -16,65 +16,53 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.http.netty4; + +package org.elasticsearch.http.nio; import io.netty.handler.codec.http.FullHttpResponse; -import org.elasticsearch.ESNetty4IntegTestCase; -import org.elasticsearch.common.network.NetworkModule; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.NioIntegTestCase; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import java.util.ArrayList; import java.util.Collection; -import java.util.List; import java.util.Locale; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; @ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1) -public class Netty4PipeliningDisabledIT extends ESNetty4IntegTestCase { +public class NioPipeliningIT extends NioIntegTestCase { @Override protected boolean addMockHttpTransport() { return false; // enable http } - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("http.pipelining", false) - .build(); - } - - public void testThatNettyHttpServerDoesNotSupportPipelining() throws Exception { - ensureGreen(); - String[] requests = new String[] {"/", "/_nodes/stats", "/", "/_cluster/state", "/", "/_nodes", "/"}; + public void testThatNioHttpServerSupportsPipelining() throws Exception { + String[] requests = new String[]{"/", "/_nodes/stats", "/", "/_cluster/state", "/"}; HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class); TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses(); - TransportAddress transportAddress = (TransportAddress) randomFrom(boundAddresses); + TransportAddress transportAddress = randomFrom(boundAddresses); try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { Collection responses = nettyHttpClient.get(transportAddress.address(), requests); - assertThat(responses, hasSize(requests.length)); - - List opaqueIds = new ArrayList<>(Netty4HttpClient.returnOpaqueIds(responses)); + assertThat(responses, hasSize(5)); - assertResponsesOutOfOrder(opaqueIds); + Collection opaqueIds = Netty4HttpClient.returnOpaqueIds(responses); + assertOpaqueIdsInOrder(opaqueIds); } } - /** - * checks if all responses are there, but also tests that they are out of order because pipelining is disabled - */ - private void assertResponsesOutOfOrder(List opaqueIds) { - String message = String.format(Locale.ROOT, "Expected returned http message ids to be in any order of: %s", opaqueIds); - assertThat(message, opaqueIds, containsInAnyOrder("0", "1", "2", "3", "4", "5", "6")); + private void assertOpaqueIdsInOrder(Collection opaqueIds) { + // check if opaque ids are monotonically increasing + int i = 0; + String msg = String.format(Locale.ROOT, "Expected list of opaque ids to be monotonically increasing, got [%s]", opaqueIds); + for (String opaqueId : opaqueIds) { + assertThat(msg, opaqueId, is(String.valueOf(i++))); + } } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index c19cbe4687ce6..d9cf0f630c0f2 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -227,7 +227,6 @@ public void apply(Settings value, Settings current, Settings previous) { HttpTransportSettings.SETTING_CORS_ENABLED, HttpTransportSettings.SETTING_CORS_MAX_AGE, HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED, - HttpTransportSettings.SETTING_PIPELINING, HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN, HttpTransportSettings.SETTING_HTTP_HOST, HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST, diff --git a/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java b/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java index f86049292f3fd..df038e8303edb 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java @@ -29,9 +29,11 @@ public class HttpHandlingSettings { private final boolean compression; private final int compressionLevel; private final boolean detailedErrorsEnabled; + private final int pipeliningMaxEvents; public HttpHandlingSettings(int maxContentLength, int maxChunkSize, int maxHeaderSize, int maxInitialLineLength, - boolean resetCookies, boolean compression, int compressionLevel, boolean detailedErrorsEnabled) { + boolean resetCookies, boolean compression, int compressionLevel, boolean detailedErrorsEnabled, + int pipeliningMaxEvents) { this.maxContentLength = maxContentLength; this.maxChunkSize = maxChunkSize; this.maxHeaderSize = maxHeaderSize; @@ -40,6 +42,7 @@ public HttpHandlingSettings(int maxContentLength, int maxChunkSize, int maxHeade this.compression = compression; this.compressionLevel = compressionLevel; this.detailedErrorsEnabled = detailedErrorsEnabled; + this.pipeliningMaxEvents = pipeliningMaxEvents; } public int getMaxContentLength() { @@ -73,4 +76,8 @@ public int getCompressionLevel() { public boolean getDetailedErrorsEnabled() { return detailedErrorsEnabled; } + + public int getPipeliningMaxEvents() { + return pipeliningMaxEvents; + } } diff --git a/server/src/main/java/org/elasticsearch/http/HttpPipelinedMessage.java b/server/src/main/java/org/elasticsearch/http/HttpPipelinedMessage.java new file mode 100644 index 0000000000000..7db8666e73ae3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/http/HttpPipelinedMessage.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.http; + +public class HttpPipelinedMessage implements Comparable { + + private final int sequence; + + public HttpPipelinedMessage(int sequence) { + this.sequence = sequence; + } + + public int getSequence() { + return sequence; + } + + @Override + public int compareTo(HttpPipelinedMessage o) { + return Integer.compare(sequence, o.sequence); + } +} diff --git a/server/src/main/java/org/elasticsearch/http/HttpPipelinedRequest.java b/server/src/main/java/org/elasticsearch/http/HttpPipelinedRequest.java new file mode 100644 index 0000000000000..df8bd7ee1eb80 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/http/HttpPipelinedRequest.java @@ -0,0 +1,33 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.http; + +public class HttpPipelinedRequest extends HttpPipelinedMessage { + + private final R request; + + HttpPipelinedRequest(int sequence, R request) { + super(sequence); + this.request = request; + } + + public R getRequest() { + return request; + } +} diff --git a/server/src/main/java/org/elasticsearch/http/HttpPipeliningAggregator.java b/server/src/main/java/org/elasticsearch/http/HttpPipeliningAggregator.java new file mode 100644 index 0000000000000..f38e9677979db --- /dev/null +++ b/server/src/main/java/org/elasticsearch/http/HttpPipeliningAggregator.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.http; + +import org.elasticsearch.common.collect.Tuple; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.PriorityQueue; + +public class HttpPipeliningAggregator { + + private final int maxEventsHeld; + private final PriorityQueue> outboundHoldingQueue; + /* + * The current read and write sequence numbers. Read sequence numbers are attached to requests in the order they are read from the + * channel, and then transferred to responses. A response is not written to the channel context until its sequence number matches the + * current write sequence, implying that all preceding messages have been written. + */ + private int readSequence; + private int writeSequence; + + public HttpPipeliningAggregator(int maxEventsHeld) { + this.maxEventsHeld = maxEventsHeld; + this.outboundHoldingQueue = new PriorityQueue<>(1, Comparator.comparing(Tuple::v1)); + } + + public HttpPipelinedRequest read(final Request request) { + return new HttpPipelinedRequest<>(readSequence++, request); + } + + public List> write(final Response response, Listener listener) { + if (outboundHoldingQueue.size() < maxEventsHeld) { + ArrayList> readyResponses = new ArrayList<>(); + outboundHoldingQueue.add(new Tuple<>(response, listener)); + while (!outboundHoldingQueue.isEmpty()) { + /* + * Since the response with the lowest sequence number is the top of the priority queue, we know if its sequence + * number does not match the current write sequence number then we have not processed all preceding responses yet. + */ + final Tuple top = outboundHoldingQueue.peek(); + + if (top.v1().getSequence() != writeSequence) { + break; + } + outboundHoldingQueue.poll(); + readyResponses.add(top); + writeSequence++; + } + + return readyResponses; + } else { + int eventCount = outboundHoldingQueue.size() + 1; + throw new IllegalStateException("Too many pipelined events [" + eventCount + "]. Max events allowed [" + + maxEventsHeld + "]."); + } + } + + public List> removeAllInflightResponses() { + ArrayList> responses = new ArrayList<>(outboundHoldingQueue); + outboundHoldingQueue.clear(); + return responses; + } +} diff --git a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index 98451e0c304b9..4670137d09a54 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -49,8 +49,6 @@ public final class HttpTransportSettings { new Setting<>("http.cors.allow-headers", "X-Requested-With,Content-Type,Content-Length", (value) -> value, Property.NodeScope); public static final Setting SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, Property.NodeScope); - public static final Setting SETTING_PIPELINING = - Setting.boolSetting("http.pipelining", true, Property.NodeScope); public static final Setting SETTING_PIPELINING_MAX_EVENTS = Setting.intSetting("http.pipelining.max_events", 10000, Property.NodeScope); public static final Setting SETTING_HTTP_COMPRESSION = diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 5099fc0540de2..b945f7d84eb95 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -300,7 +300,6 @@ public InternalTestCluster(long clusterSeed, Path baseDir, builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos")); builder.put(TcpTransport.PORT.getKey(), 0); builder.put("http.port", 0); - builder.put("http.pipelining", enableHttpPipelining); if (Strings.hasLength(System.getProperty("tests.es.logger.level"))) { builder.put("logger.level", System.getProperty("tests.es.logger.level")); } From 1094ec09175fe3df264c09d76fce9f239ef71824 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 22 May 2018 08:33:14 -0700 Subject: [PATCH 08/22] [DOCS] Remove X-Pack references from SQL CLI (#30694) --- x-pack/docs/en/sql/endpoints/cli.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/docs/en/sql/endpoints/cli.asciidoc b/x-pack/docs/en/sql/endpoints/cli.asciidoc index 8f217b61e452a..edbb1dcace4f1 100644 --- a/x-pack/docs/en/sql/endpoints/cli.asciidoc +++ b/x-pack/docs/en/sql/endpoints/cli.asciidoc @@ -2,7 +2,7 @@ [[sql-cli]] == SQL CLI -X-Pack ships with a script to run the SQL CLI in its bin directory: +Elasticsearch ships with a script to run the SQL CLI in its `bin` directory: [source,bash] -------------------------------------------------- @@ -11,7 +11,7 @@ $ ./bin/elasticsearch-sql-cli The jar containing the SQL CLI is a stand alone Java application and the scripts just launch it. You can move it around to other machines -without having to install Elasticsearch or X-Pack on them. +without having to install Elasticsearch on them. You can pass the URL of the Elasticsearch instance to connect to as the first parameter: From 9ffeb171e0c460449e132840c29ae598a36fafb5 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 22 May 2018 08:50:35 -0700 Subject: [PATCH 09/22] [DOCS] Add SAML configuration information (#30548) --- .../configuring-saml-realm.asciidoc | 225 ++++++++++++++++++ .../authentication/saml-guide.asciidoc | 2 + .../docs/en/security/configuring-es.asciidoc | 2 + 3 files changed, 229 insertions(+) create mode 100644 x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc diff --git a/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc new file mode 100644 index 0000000000000..cbcbeebb359ef --- /dev/null +++ b/x-pack/docs/en/security/authentication/configuring-saml-realm.asciidoc @@ -0,0 +1,225 @@ +[role="xpack"] +[[configuring-saml-realm]] +=== Configuring a SAML realm + +The {stack} supports Security Assertion Markup Language Single Sign On (SAML SSO) +into {kib} with {es} as a backend service. In particular, the {stack} supports +the SAML 2.0 Web Browser SSO and the SAML 2.0 Single Logout profiles. It can +integrate with any identity provider (IdP) that supports at least the SAML 2.0 +Web Browser SSO Profile. + +In SAML terminology, the {stack} is operating as a _service provider_ (SP). For more +information, see {stack-ov}/saml-realm.html[SAML authentication] and +{stack-ov}/saml-guide.html[Configuring SAML SSO on the {stack}]. + +[NOTE] +-- + +* If you configure a SAML realm for use in {kib}, you should also configure +another realm, such as the native realm in your authentication chain. +* These instructions assume that you have an existing SAML identity provider. +-- + +To enable SAML authentication in {es} and add the {stack} as a service provider: + +. Enable SSL/TLS for HTTP. ++ +-- +If your {es} cluster is operating in production mode, you must +configure the HTTP interface to use TLS before you can enable SAML +authentication. + +See <>. +-- + +. Enable the Token Service. ++ +-- +The {es} SAML implementation makes use of the {es} Token Service. This service +is automatically enabled if you configure TLS on the HTTP interface. You can +explicitly enable it by including the following setting in your +`elasticsearch.yml` file: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.token.enabled: true +------------------------------------------------------------ +-- + +. Configure a SAML IdP metadata file. ++ +-- +The {stack} uses a standard SAML metadata document in XML format, which defines +the capabilities and features of your identity provider. You should be able to +download or generate such a document within your IdP administration interface. + +Most IdPs will provide an appropriate metadata file with all the features that +the {stack} requires. For more information, see +{stack-ov}/saml-guide-idp.html[The identity provider]. +-- + +.. Download the IdP metadata document and store it within the `config` directory +on each {es} node. For example, store it as `config/saml/idp-metadata.xml`. + +.. Get the identifier for your identity provider. ++ +-- +The IdP will have been assigned an identifier (_EntityID_ in SAML terminology), +which is most commonly expressed in Uniform Resource Identifier (URI) form. Your +admin interface might tell you what this is or you might need to read the +metadata document to find it. Look for the `entityID` attribute on the +`EntityDescriptor` element. +-- + +. Create one or more SAML realms. ++ +-- +SAML authentication is enabled by configuring a SAML realm within the +authentication chain for {es}. + +This realm has a few mandatory settings, and a number of optional settings. +The available settings are described in detail in the +<>. The following settings (in the `elasticsearch.yml` +configuration file) are the most common settings: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.saml1: <1> + type: saml <2> + order: 2 <3> + idp.metadata.path: saml/idp-metadata.xml <4> + idp.entity_id: "https://sso.example.com/" <5> + sp.entity_id: "https://kibana.example.com/" <6> + sp.acs: "https://kibana.example.com/api/security/v1/saml" <7> + sp.logout: "https://kibana.example.com/logout" <8> +------------------------------------------------------------ +<1> This setting defines a new authentication realm named "saml1". For an +introduction to realms, see {stack-ov}/realms.html[Realms]. +<2> The `type` must be `saml`. +<3> You should define a unique order on each realm in your authentication chain. +It is recommended that the SAML realm be at the bottom of your authentication +chain (that is, it has the _highest_ order). +<4> This is the path to the metadata file that you saved for your identity provider. +The path that you enter here is relative to your `config/` directory. {security} +automatically monitors this file for changes and reloads the configuration +whenever it is updated. +<5> This is the identifier (SAML EntityID) that your IdP uses. It should match +the `entityID` attribute within the metadata file. +<6> This is a unique identifier for your {kib} instance, expressed as a URI. +You will use this value when you add {kib} as a service provider within your IdP. +We recommend that you use the base URL for your {kib} instance as the entity ID. +<7> The Assertion Consumer Service (ACS) endpoint is the URL within {kib} that +accepts authentication messages from the IdP. This ACS endpoint supports the +SAML HTTP-POST binding only. It must be a URL that is accessible from the web +browser of the user who is attempting to login to {kib}; it does not need to be +directly accessible by {es} or the IdP. The correct value can vary depending on +how you have installed {kib} and whether there are any proxies involved, but it +is typically +$\{kibana-url}/api/security/v1/saml+ where _$\{kibana-url}_ is the +base URL for your {kib} instance. +<8> This is the URL within {kib} that accepts logout messages from the IdP. +Like the `sp.acs` URL, it must be accessible from the web browser, but does +not need to be directly accessible by {es} or the IdP. The correct value can +vary depending on how you have installed {kib} and whether there are any +proxies involved, but it will typically be +$\{kibana-url}/logout+ where +_$\{kibana-url}_ is the base URL for your {kib} instance. + +IMPORTANT: SAML is used when authenticating via {kib}, but it is not an +effective means of authenticating directly to the {es} REST API. For this reason +we recommend that you include at least one additional realm such as the +native realm in your authentication chain for use by API clients. + +For more information, see +{stack-ov}/saml-guide-authentication.html#saml-create-realm[Create a SAML realm]. +-- + +. Add attribute mappings. ++ +-- +When a user connects to {kib} through the identity provider, the IdP supplies a +SAML assertion that includes attributes for the user. You can configure the SAML +realm to map these attributes to properties on the authenticated user. + +The recommended steps for configuring these SAML attributes are as follows: +-- +.. Consult your IdP to see what user attributes it can provide. This varies +greatly between providers, but you should be able to obtain a list from the +documentation or from your local admin. + +.. Read through the list of user properties that {es} supports and decide which +of them are useful to you and can be provided by your IdP. At a minimum, the +`principal` attribute is required. The `groups` attribute is recommended. + +.. Configure your IdP to release those attributes to your {kib} SAML service +provider. ++ +-- +This process varies by provider - some provide a user interface for this, while +others might require that you edit configuration files. Usually the IdP (or your +local administrator) have suggestions about what URI to use for each attribute. +You can simply accept those suggestions, as the {es} service is entirely +configurable and does not require that any specific URIs are used. +-- + +.. Configure the SAML realm to associate the {es} user properties to the URIs +that you configured in your IdP. ++ +-- +For example, add the following settings to the `elasticsearch.yml` configuration +file: + +[source, yaml] +------------------------------------------------------------ +xpack.security.authc.realms.saml1: + ... + attributes.principal: "urn:oid:0.9.2342.19200300.100.1.1" + attributes.groups: "urn:oid:1.3.6.1.4.1.5923.1.5.1." +------------------------------------------------------------ + +For more information, see +{stack-ov}/saml-guide-authentication.html#saml-attribute-mapping[Attribute mapping]. +-- + +. (Optional) Configure logout services. ++ +-- +The SAML protocol supports the concept of Single Logout (SLO). The level of +support for SLO varies between identity providers. + +For more information, see +{stack-ov}/saml-guide-authentication.html#saml-logout[SAML logout]. +-- + +. (Optional) Configure encryption and signing. ++ +-- +The {stack} supports generating signed SAML messages (for authentication and/or +logout), verifying signed SAML messages from the IdP (for both authentication +and logout), and processing encrypted content. + +You can configure {es} for signing, encryption, or both, with the same or +separate keys. For more information, see +{stack-ov}/saml-guide-authentication.html#saml-enc-sign[Encryption and signing]. +-- + +. (Optional) Generate service provider metadata. ++ +-- +There are some extra configuration steps that are specific to each identity +provider. If your identity provider can import SP metadata, some of those steps +can be automated or expedited. You can generate SP metadata for the {stack} by +using the <>. +-- + +. Configure role mappings. ++ +-- +When a user authenticates using SAML, they are identified to the {stack}, +but this does not automatically grant them access to perform any actions or +access any data. + +Your SAML users cannot do anything until they are mapped to {security} +roles. See {stack-ov}/saml-role-mapping.html[Configuring role mappings]. +-- + +. {stack-ov}/saml-kibana.html[Configure {kib} to use SAML SSO]. + diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index 740f51c877ded..a57cfaec84c43 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -22,6 +22,7 @@ the primary (or sole) authentication method for users of that {kib} instance. Once you enable SAML authentication in {kib} it will affect all users who try to login. The <> section provides more detail about how this works. +[[saml-guide-idp]] === The identity provider The Elastic Stack supports the SAML 2.0 _Web Browser SSO_ and the SAML @@ -70,6 +71,7 @@ For `` messages, the message itself must be signed, and the signature should be provided as a URL parameter, as required by the HTTP-Redirect binding. +[[saml-guide-authentication]] === Configure {es} for SAML authentication There are five configuration steps to enable SAML authentication in {es}: diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index de3895d34b000..d8ef6c2809b34 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -76,6 +76,7 @@ user API. ** <>. ** <>. ** <>. +** <>. . Set up roles and users to control access to {es}. For example, to grant _John Doe_ full access to all indices that match @@ -140,5 +141,6 @@ include::authentication/configuring-file-realm.asciidoc[] include::authentication/configuring-ldap-realm.asciidoc[] include::authentication/configuring-native-realm.asciidoc[] include::authentication/configuring-pki-realm.asciidoc[] +include::authentication/configuring-saml-realm.asciidoc[] include::{xes-repo-dir}/settings/security-settings.asciidoc[] include::{xes-repo-dir}/settings/audit-settings.asciidoc[] From 1918a3023754985b02743c1fb3a16c04ae0a732e Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 22 May 2018 13:11:48 -0400 Subject: [PATCH 10/22] Upgrade to Lucene-7.4.0-snapshot-cc2ee23050 (#30778) The new snapshot includes LUCENE-8324 which fixes missing checkpoint after a fully deletes segment is dropped on flush. This snapshot should resolves failed tests in the CorruptedFileIT suite. Closes #30741 Closes #30577 --- buildSrc/version.properties | 2 +- .../lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + ...ne-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - ...ne-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + ...lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - ...lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + ...ne-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - ...ne-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + ...ene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - ...ene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + ...ene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - ...ene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + ...-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - ...-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + ...cene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - ...cene-analyzers-common-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + ...ucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - ...ucene-backward-codecs-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../lucene-grouping-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../lucene-highlighter-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../licenses/lucene-join-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../lucene-memory-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../licenses/lucene-misc-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../lucene-queries-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../lucene-queryparser-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../lucene-sandbox-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../lucene-spatial-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + ...lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - ...lucene-spatial-extras-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../lucene-spatial3d-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../lucene-suggest-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../java/org/elasticsearch/index/store/CorruptedFileIT.java | 6 ------ .../indices/analysis/AnalysisFactoryTestCase.java | 4 ++-- .../licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + .../licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 | 1 - .../licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 | 1 + 53 files changed, 28 insertions(+), 34 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-analyzers-common-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-grouping-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-join-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-memory-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-misc-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-queries-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-spatial-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 server/licenses/lucene-suggest-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 create mode 100644 x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a547982e3b613..c98e265792b5b 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.4.0-snapshot-59f2b7aec2 +lucene = 7.4.0-snapshot-cc2ee23050 # optional dependencies spatial4j = 0.7 diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 702782e1c5ed7..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a3dba337d06e1f5930cb7ae638c1655b99ce0cb7 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..8222106897b18 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +1e28b448387ec05d655f8c81ee54e13ff2975a4d \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index f99b0177de590..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -473a7f4d955f132bb498482648266653f8da85bd \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..781b814c99e45 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +452c9a9f86b79b9b3eaa7d6aa782e189d5bcfe8f \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 08269eed6360f..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5a72b9a790e2552248c8bbb36af47c4c399ba27 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..baba08978587f --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +48c76a922bdfc7f50b1b6fe22e9456c555f3f990 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 325fe16120428..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -14f680ab9b886c7c5224ff682a7fa70b6df44a05 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..da19e1c3857a5 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +4db5777df468b0867ff6539c9ab687e0ed6cab41 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 9e88119ed1d16..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e033c68c9ec1ba9cd8439758adf7eb5fee22acef \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..148b5425d64b1 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +0e09e6b011ab2b1a0e3e0e1df2ab2a91dca8ba23 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 74721c857571c..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -08df0a5029f11c109b22064dec78c05dfa25f9e3 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..bce84d16a9a3d --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +ceefa0f9789ab9ea5c8ab9f67ed7a601a3ae6aa9 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 1c257797c08e2..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a9d1819b2b13f134f6a605ab5a59ce3c602c0460 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..762c56f77001f --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +b013adc183e52a74795ad3d3032f4d0f9db30b73 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 117ac05c91fe1..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47bc91ccb0cdf0c1c404646ffe0d5fd6b020a4ab \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..7631bea25691f --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +95300f29418f60e57e022d934d3462be9e1e2225 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 14f5fcb381f1c..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b70d03784d06a643e096fae4d959200aa246ba16 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..1c471a77d80c4 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +96ab108569c77932ecb17c45421affece207df5c \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 47afb59e45eb7..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d660a63ac0f7ab2772a45ae518518472bf620620 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..4c8842872abcd --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +72d09ca50979f716a57f53f2de33d55023a166ec \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 50392f59374a8..0000000000000 --- a/server/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf8f9e8284a54af18545574cb4a530da0deb968a \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..4aecfc6a550d3 --- /dev/null +++ b/server/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +e118e4d05070378516b9055184b74498ba528dee \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 27d7aaab2f589..0000000000000 --- a/server/licenses/lucene-grouping-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9eaae9dcd4ec88227475cb81d3be9afa767f1b22 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..948aacf662f5e --- /dev/null +++ b/server/licenses/lucene-grouping-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +2b2ea6bfe6fa159bbf205bf7f7fa2ed2c22bbffc \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 176c3a86afe7f..0000000000000 --- a/server/licenses/lucene-highlighter-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd15f0008742c84899d678cb0cecda06d0a6d63e \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..30a960c5a8047 --- /dev/null +++ b/server/licenses/lucene-highlighter-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +423e4fff9276101d845d6073dc6cd27504def207 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 0bfe9cfb79aff..0000000000000 --- a/server/licenses/lucene-join-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5ce38b8610a7f402f2da3b0e408e508151d979c5 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..fb3cd72c75569 --- /dev/null +++ b/server/licenses/lucene-join-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +27561038da2edcae3ecc3a08b0a52824966af87a \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index c1a0127e2ce73..0000000000000 --- a/server/licenses/lucene-memory-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -53819f03a07050a4af28361d64395c86f2cea008 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..cd989836ab24f --- /dev/null +++ b/server/licenses/lucene-memory-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +d7d422159f705261784d121e24877119d9c95083 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 615a0dec0c0d4..0000000000000 --- a/server/licenses/lucene-misc-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8cdc0e2b65d146ed11f4d2507109e530d59ff33d \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..c4d8ad61c7396 --- /dev/null +++ b/server/licenses/lucene-misc-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +fc09508fde6ba87f241d7e3148d9e310c0db9cb9 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 12f5eff262e9c..0000000000000 --- a/server/licenses/lucene-queries-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e56090463703112ad64ad457d18bae9a5b2966b8 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..0cb51736803cd --- /dev/null +++ b/server/licenses/lucene-queries-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +201fdf3432ff3fef0f48c38c2c0f482c144f6868 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index a787a00541a54..0000000000000 --- a/server/licenses/lucene-queryparser-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9faf974b77058e44a6d35e956db4f5fb67389dfa \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..ecd6440ba642a --- /dev/null +++ b/server/licenses/lucene-queryparser-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +917df8c8d08952a012a34050b183b6204ae7081b \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 7d95cd6b3b6e3..0000000000000 --- a/server/licenses/lucene-sandbox-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b852b1fe70ef70736b2b1a9ad57eb93cbaed0423 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..3e65eaeef91b3 --- /dev/null +++ b/server/licenses/lucene-sandbox-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +caff84fa66cb0376835c39f3d4ca7dfd2177d8f4 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index ac0598b3f0c49..0000000000000 --- a/server/licenses/lucene-spatial-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2fa99ec7140fcf35db16ac1feb78ef142750d39 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..c86854b16c308 --- /dev/null +++ b/server/licenses/lucene-spatial-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +e1bce61a9d9129a8d0fdd3127a84665d29f53eb0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index a2537dbdde529..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c9963f60d3a0924b877a6f910650c5f2384822a0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..144984a3869b0 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +3a2e4373d79fda968a078971efa2cb8ec9ff65b0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 6844bcd13b278..0000000000000 --- a/server/licenses/lucene-spatial3d-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f33ba54da5e0e125f4c5ef7dd800dd6185e4f61 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..fd19f4ad8114a --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +7f14927e5c3c1c85c4c5b3681c28c5e36f241dda \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 0343db2d94485..0000000000000 --- a/server/licenses/lucene-suggest-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bb3c18c987395dae6fe63744f5a50fd367ea5a74 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-cc2ee23050.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..ba405960dbeb7 --- /dev/null +++ b/server/licenses/lucene-suggest-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +6e708a38c957a655e0cfedb06a1b9aa892929db0 \ No newline at end of file diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 108b41d54a08e..23533217ba1cf 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -470,8 +470,6 @@ protected void sendRequest(Connection connection, long requestId, String action, * TODO once checksum verification on snapshotting is implemented this test needs to be fixed or split into several * parts... We should also corrupt files on the actual snapshot and check that we don't restore the corrupted shard. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30577") - @TestLogging("org.elasticsearch.repositories:TRACE,org.elasticsearch.snapshots:TRACE,org.elasticsearch.index.engine:DEBUG") public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, InterruptedException, IOException { int numDocs = scaledRandomIntBetween(100, 1000); internalCluster().ensureAtLeastNumDataNodes(2); @@ -520,10 +518,6 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I break; } } - if (snapshotState != SnapshotState.PARTIAL) { - logger.info("--> listing shard files for investigation"); - files.forEach(f -> logger.info("path: {}", f.toAbsolutePath())); - } assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL)); assertThat(corruptedFile, notNullValue()); } diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index 0396b8ac78820..f26c44e05f506 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -217,9 +217,9 @@ private static String toCamelCase(String s) { // should we expose it, or maybe think about higher level integration of the // fake term frequency feature (LUCENE-7854) .put("delimitedtermfrequency", Void.class) - // LUCENE-8273: ConditionalTokenFilter allows analysis chains to skip + // LUCENE-8273: ProtectedTermFilterFactory allows analysis chains to skip // particular token filters based on the attributes of the current token. - .put("termexclusion", Void.class) + .put("protectedterm", Void.class) .immutableMap(); diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 50392f59374a8..0000000000000 --- a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf8f9e8284a54af18545574cb4a530da0deb968a \ No newline at end of file diff --git a/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..4aecfc6a550d3 --- /dev/null +++ b/x-pack/plugin/sql/jdbc/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +e118e4d05070378516b9055184b74498ba528dee \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 deleted file mode 100644 index 50392f59374a8..0000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-59f2b7aec2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf8f9e8284a54af18545574cb4a530da0deb968a \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 new file mode 100644 index 0000000000000..4aecfc6a550d3 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-cc2ee23050.jar.sha1 @@ -0,0 +1 @@ +e118e4d05070378516b9055184b74498ba528dee \ No newline at end of file From f7b5986682a5b439589897dbcf1997fb3eafc066 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 22 May 2018 20:22:42 +0200 Subject: [PATCH 11/22] [Docs] Fix script-fields snippet execution (#30693) Currently the first snippet in the documentation test in script-fields.asciidoc isn't executed, although it has the CONSOLE annotation. Adding a test setup annotation to it seems to fix the problem. --- docs/reference/search/request/script-fields.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/reference/search/request/script-fields.asciidoc b/docs/reference/search/request/script-fields.asciidoc index 55623faf2684c..da5868ea7d65e 100644 --- a/docs/reference/search/request/script-fields.asciidoc +++ b/docs/reference/search/request/script-fields.asciidoc @@ -15,13 +15,13 @@ GET /_search "test1" : { "script" : { "lang": "painless", - "source": "doc['my_field_name'].value * 2" + "source": "doc['price'].value * 2" } }, "test2" : { "script" : { "lang": "painless", - "source": "doc['my_field_name'].value * params.factor", + "source": "doc['price'].value * params.factor", "params" : { "factor" : 2.0 } @@ -31,7 +31,7 @@ GET /_search } -------------------------------------------------- // CONSOLE - +// TEST[setup:sales] Script fields can work on fields that are not stored (`my_field_name` in the above case), and allow to return custom values to be returned (the From 0d37ac4e8c8e25ce963907bb903e664be700b153 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Tue, 22 May 2018 20:31:36 +0200 Subject: [PATCH 12/22] [TEST] remove endless wait in RestClientTests (#30776) This commit adds a max wait timeout of one second to all the latch.await calls made in RestClientTests. It also makes clearer that the `onSuccess` listener method will never be called given that the underlying http client is mocked and makes sure that `latch.countDown` is always called --- .../elasticsearch/client/RestClientTests.java | 80 ++++++++++++------- 1 file changed, 50 insertions(+), 30 deletions(-) diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index ea124828e45eb..92d90bff71073 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -27,11 +27,13 @@ import java.net.URI; import java.util.Collections; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -57,17 +59,20 @@ public void testPerformAsyncWithUnsupportedMethod() throws Exception { restClient.performRequestAsync(new Request("unsupported", randomAsciiLettersOfLength(5)), new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of unsupported method"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(UnsupportedOperationException.class)); - assertEquals("http method not supported: unsupported", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(UnsupportedOperationException.class)); + assertEquals("http method not supported: unsupported", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } @@ -81,17 +86,20 @@ public void testPerformAsyncOldStyleWithUnsupportedMethod() throws Exception { restClient.performRequestAsync("unsupported", randomAsciiLettersOfLength(5), new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of unsupported method"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(UnsupportedOperationException.class)); - assertEquals("http method not supported: unsupported", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(UnsupportedOperationException.class)); + assertEquals("http method not supported: unsupported", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } @@ -105,17 +113,20 @@ public void testPerformOldStyleAsyncWithNullParams() throws Exception { restClient.performRequestAsync(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5), null, new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of null parameters"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("parameters cannot be null", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(NullPointerException.class)); + assertEquals("parameters cannot be null", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } @@ -129,18 +140,21 @@ public void testPerformOldStyleAsyncWithNullHeaders() throws Exception { ResponseListener listener = new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of null headers"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("header cannot be null", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(NullPointerException.class)); + assertEquals("header cannot be null", exception.getMessage()); + } finally { + latch.countDown(); + } } }; restClient.performRequestAsync("GET", randomAsciiLettersOfLength(5), listener, (Header) null); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } @@ -150,17 +164,20 @@ public void testPerformAsyncWithWrongEndpoint() throws Exception { restClient.performRequestAsync(new Request("GET", "::http:///"), new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of wrong endpoint"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(IllegalArgumentException.class)); - assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(IllegalArgumentException.class)); + assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } @@ -174,17 +191,20 @@ public void testPerformAsyncOldStyleWithWrongEndpoint() throws Exception { restClient.performRequestAsync("GET", "::http:///", new ResponseListener() { @Override public void onSuccess(Response response) { - fail("should have failed because of wrong endpoint"); + throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); } @Override public void onFailure(Exception exception) { - assertThat(exception, instanceOf(IllegalArgumentException.class)); - assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); - latch.countDown(); + try { + assertThat(exception, instanceOf(IllegalArgumentException.class)); + assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); + } finally { + latch.countDown(); + } } }); - latch.await(); + assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); } } From a17d6cab9853ae8a87a52210cdd84cfaec8b0ad2 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Tue, 22 May 2018 20:32:30 +0200 Subject: [PATCH 13/22] Replace Request#setHeaders with addHeader (#30588) Adding headers rather than setting them all at once seems more user-friendly and we already do it in a similar way for parameters (see Request#addParameter). --- .../client/RestHighLevelClient.java | 13 ++-- .../CustomRestHighLevelClientTests.java | 4 +- .../org/elasticsearch/client/Request.java | 64 +++++++++++++------ .../org/elasticsearch/client/RestClient.java | 34 +++++++--- .../elasticsearch/client/RequestTests.java | 58 ++++++++--------- .../RestClientSingleHostIntegTests.java | 5 +- .../client/RestClientSingleHostTests.java | 12 ++-- .../elasticsearch/client/RestClientTests.java | 2 +- .../RestClientDocumentation.java | 9 +-- .../client/RestClientTestUtil.java | 2 +- docs/java-rest/low-level/usage.asciidoc | 2 +- .../http/ContextAndHeaderTransportIT.java | 5 +- .../org/elasticsearch/http/CorsNotSetIT.java | 4 +- .../org/elasticsearch/http/CorsRegexIT.java | 53 +++++++-------- .../elasticsearch/http/HttpCompressionIT.java | 3 +- .../org/elasticsearch/http/NoHandlerIT.java | 3 +- .../http/ResponseHeaderPluginIT.java | 5 +- .../rest/yaml/ESClientYamlSuiteTestCase.java | 3 +- .../qa/sql/security/RestSqlSecurityIT.java | 7 +- .../xpack/qa/sql/rest/RestSqlTestCase.java | 12 ++-- 20 files changed, 166 insertions(+), 134 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 5dbf2709d9988..68e32abb69dc0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -26,8 +26,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; -import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; @@ -592,7 +590,7 @@ protected final Resp performRequest(Req reques throw validationException; } Request req = requestConverter.apply(request); - req.setHeaders(headers); + addHeaders(req, headers); Response response; try { response = client.performRequest(req); @@ -642,12 +640,19 @@ protected final void performRequestAsync(Req r listener.onFailure(e); return; } - req.setHeaders(headers); + addHeaders(req, headers); ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores); client.performRequestAsync(req, responseListener); } + private static void addHeaders(Request request, Header... headers) { + Objects.requireNonNull(headers, "headers cannot be null"); + for (Header header : headers) { + request.addHeader(header.getName(), header.getValue()); + } + } + final ResponseListener wrapResponseListener(CheckedFunction responseConverter, ActionListener actionListener, Set ignores) { return new ResponseListener() { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java index 617b35c4d40f3..0bd6ecef8fb5c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java @@ -73,12 +73,12 @@ public void initClients() throws IOException { final RestClient restClient = mock(RestClient.class); restHighLevelClient = new CustomRestClient(restClient); - doAnswer(inv -> mockPerformRequest(((Request) inv.getArguments()[0]).getHeaders()[0])) + doAnswer(inv -> mockPerformRequest(((Request) inv.getArguments()[0]).getHeaders().iterator().next())) .when(restClient) .performRequest(any(Request.class)); doAnswer(inv -> mockPerformRequestAsync( - ((Request) inv.getArguments()[0]).getHeaders()[0], + ((Request) inv.getArguments()[0]).getHeaders().iterator().next(), (ResponseListener) inv.getArguments()[1])) .when(restClient) .performRequestAsync(any(Request.class), any(ResponseListener.class)); diff --git a/client/rest/src/main/java/org/elasticsearch/client/Request.java b/client/rest/src/main/java/org/elasticsearch/client/Request.java index 92610239cae92..59b82e5bf9649 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Request.java @@ -19,14 +19,17 @@ package org.elasticsearch.client; -import org.apache.http.entity.ContentType; import org.apache.http.Header; import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; -import java.util.Arrays; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -36,13 +39,12 @@ * HTTP Request to Elasticsearch. */ public final class Request { - private static final Header[] NO_HEADERS = new Header[0]; private final String method; private final String endpoint; private final Map parameters = new HashMap<>(); + private final List
headers = new ArrayList<>(); private HttpEntity entity; - private Header[] headers = NO_HEADERS; private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory = HttpAsyncResponseConsumerFactory.DEFAULT; @@ -125,21 +127,19 @@ public HttpEntity getEntity() { } /** - * Set the headers to attach to the request. + * Add the provided header to the request. */ - public void setHeaders(Header... headers) { - Objects.requireNonNull(headers, "headers cannot be null"); - for (Header header : headers) { - Objects.requireNonNull(header, "header cannot be null"); - } - this.headers = headers; + public void addHeader(String name, String value) { + Objects.requireNonNull(name, "header name cannot be null"); + Objects.requireNonNull(value, "header value cannot be null"); + this.headers.add(new ReqHeader(name, value)); } /** * Headers to attach to the request. */ - public Header[] getHeaders() { - return headers; + List
getHeaders() { + return Collections.unmodifiableList(headers); } /** @@ -175,13 +175,13 @@ public String toString() { if (entity != null) { b.append(", entity=").append(entity); } - if (headers.length > 0) { + if (headers.size() > 0) { b.append(", headers="); - for (int h = 0; h < headers.length; h++) { + for (int h = 0; h < headers.size(); h++) { if (h != 0) { b.append(','); } - b.append(headers[h].toString()); + b.append(headers.get(h).toString()); } } if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) { @@ -204,12 +204,40 @@ public boolean equals(Object obj) { && endpoint.equals(other.endpoint) && parameters.equals(other.parameters) && Objects.equals(entity, other.entity) - && Arrays.equals(headers, other.headers) + && headers.equals(other.headers) && httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory); } @Override public int hashCode() { - return Objects.hash(method, endpoint, parameters, entity, Arrays.hashCode(headers), httpAsyncResponseConsumerFactory); + return Objects.hash(method, endpoint, parameters, entity, headers.hashCode(), httpAsyncResponseConsumerFactory); + } + + /** + * Custom implementation of {@link BasicHeader} that overrides equals and hashCode. + */ + static final class ReqHeader extends BasicHeader { + + ReqHeader(String name, String value) { + super(name, value); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other instanceof ReqHeader) { + Header otherHeader = (Header) other; + return Objects.equals(getName(), otherHeader.getName()) && + Objects.equals(getValue(), otherHeader.getValue()); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(getName(), getValue()); + } } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 05fa4d536b3b6..33171e18e743d 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -215,7 +215,7 @@ public void performRequestAsync(Request request, ResponseListener responseListen @Deprecated public Response performRequest(String method, String endpoint, Header... headers) throws IOException { Request request = new Request(method, endpoint); - request.setHeaders(headers); + addHeaders(request, headers); return performRequest(request); } @@ -237,7 +237,7 @@ public Response performRequest(String method, String endpoint, Header... headers public Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException { Request request = new Request(method, endpoint); addParameters(request, params); - request.setHeaders(headers); + addHeaders(request, headers); return performRequest(request); } @@ -264,7 +264,7 @@ public Response performRequest(String method, String endpoint, Map requestHeaders) { // request headers override default headers, so we don't add default headers if they exist as request headers - final Set requestNames = new HashSet<>(requestHeaders.length); + final Set requestNames = new HashSet<>(requestHeaders.size()); for (Header requestHeader : requestHeaders) { httpRequest.addHeader(requestHeader); requestNames.add(requestHeader.getName()); @@ -877,10 +877,24 @@ private static class HostTuple { } } + /** + * Add all headers from the provided varargs argument to a {@link Request}. This only exists + * to support methods that exist for backwards compatibility. + */ + @Deprecated + private static void addHeaders(Request request, Header... headers) { + Objects.requireNonNull(headers, "headers cannot be null"); + for (Header header : headers) { + Objects.requireNonNull(header, "header cannot be null"); + request.addHeader(header.getName(), header.getValue()); + } + } + /** * Add all parameters from a map to a {@link Request}. This only exists * to support methods that exist for backwards compatibility. */ + @Deprecated private static void addParameters(Request request, Map parameters) { Objects.requireNonNull(parameters, "parameters cannot be null"); for (Map.Entry entry : parameters.entrySet()) { diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java index 6625c389c6be8..29bbf23a1f20e 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java @@ -19,21 +19,21 @@ package org.elasticsearch.client; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; -import static org.junit.Assert.assertArrayEquals; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; @@ -127,31 +127,33 @@ public void testSetJsonEntity() throws IOException { assertEquals(json, new String(os.toByteArray(), ContentType.APPLICATION_JSON.getCharset())); } - public void testSetHeaders() { + public void testAddHeader() { final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"}); final String endpoint = randomAsciiLettersOfLengthBetween(1, 10); Request request = new Request(method, endpoint); try { - request.setHeaders((Header[]) null); + request.addHeader(null, randomAsciiLettersOfLengthBetween(3, 10)); fail("expected failure"); } catch (NullPointerException e) { - assertEquals("headers cannot be null", e.getMessage()); + assertEquals("header name cannot be null", e.getMessage()); } try { - request.setHeaders(new Header [] {null}); + request.addHeader(randomAsciiLettersOfLengthBetween(3, 10), null); fail("expected failure"); } catch (NullPointerException e) { - assertEquals("header cannot be null", e.getMessage()); + assertEquals("header value cannot be null", e.getMessage()); } - Header[] headers = new Header[between(0, 5)]; - for (int i = 0; i < headers.length; i++) { - headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + int numHeaders = between(0, 5); + List
headers = new ArrayList<>(); + for (int i = 0; i < numHeaders; i++) { + Header header = new Request.ReqHeader(randomAsciiAlphanumOfLengthBetween(5, 10), randomAsciiAlphanumOfLength(3)); + headers.add(header); + request.addHeader(header.getName(), header.getValue()); } - request.setHeaders(headers); - assertArrayEquals(headers, request.getHeaders()); + assertEquals(headers, new ArrayList<>(request.getHeaders())); } public void testEqualsAndHashCode() { @@ -168,7 +170,7 @@ public void testEqualsAndHashCode() { assertNotEquals(mutant, request); } - private Request randomRequest() { + private static Request randomRequest() { Request request = new Request( randomFrom(new String[] {"GET", "PUT", "DELETE", "POST", "HEAD", "OPTIONS"}), randomAsciiAlphanumOfLength(5)); @@ -192,11 +194,9 @@ private Request randomRequest() { if (randomBoolean()) { int headerCount = between(1, 5); - Header[] headers = new Header[headerCount]; for (int i = 0; i < headerCount; i++) { - headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + request.addHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); } - request.setHeaders(headers); } if (randomBoolean()) { @@ -206,13 +206,13 @@ private Request randomRequest() { return request; } - private Request copy(Request request) { + private static Request copy(Request request) { Request copy = new Request(request.getMethod(), request.getEndpoint()); copyMutables(request, copy); return copy; } - private Request mutate(Request request) { + private static Request mutate(Request request) { if (randomBoolean()) { // Mutate request or method but keep everything else constant Request mutant = randomBoolean() @@ -231,11 +231,7 @@ private Request mutate(Request request) { mutant.setJsonEntity("mutant"); // randomRequest can't produce this value return mutant; case 2: - if (mutant.getHeaders().length > 0) { - mutant.setHeaders(new Header[0]); - } else { - mutant.setHeaders(new BasicHeader("extra", "m")); - } + mutant.addHeader("extra", "m"); return mutant; case 3: mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5)); @@ -245,12 +241,14 @@ private Request mutate(Request request) { } } - private void copyMutables(Request from, Request to) { + private static void copyMutables(Request from, Request to) { for (Map.Entry param : from.getParameters().entrySet()) { to.addParameter(param.getKey(), param.getValue()); } to.setEntity(from.getEntity()); - to.setHeaders(from.getHeaders()); + for (Header header : from.getHeaders()) { + to.addHeader(header.getName(), header.getValue()); + } to.setHttpAsyncResponseConsumerFactory(from.getHttpAsyncResponseConsumerFactory()); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 35cac627bbe6a..a3d0196dab9a8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -29,7 +29,6 @@ import org.apache.http.auth.AuthScope; import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.TargetAuthenticationStrategy; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; @@ -379,7 +378,9 @@ private Response bodyTest(RestClient restClient, String method, int statusCode, String requestBody = "{ \"field\": \"value\" }"; Request request = new Request(method, "/" + statusCode); request.setJsonEntity(requestBody); - request.setHeaders(headers); + for (Header header : headers) { + request.addHeader(header.getName(), header.getValue()); + } Response esResponse; try { esResponse = restClient.performRequest(request); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 714d2e57e6d20..3811b60023b43 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -312,7 +312,7 @@ public void testBody() throws IOException { } /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}. + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddHeaders()}. */ @Deprecated public void tesPerformRequestOldStyleNullHeaders() throws IOException { @@ -333,7 +333,7 @@ public void tesPerformRequestOldStyleNullHeaders() throws IOException { } /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}. + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}. */ @Deprecated public void testPerformRequestOldStyleWithNullParams() throws IOException { @@ -362,7 +362,9 @@ public void testHeaders() throws IOException { final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final int statusCode = randomStatusCode(getRandom()); Request request = new Request(method, "/" + statusCode); - request.setHeaders(requestHeaders); + for (Header requestHeader : requestHeaders) { + request.addHeader(requestHeader.getName(), requestHeader.getValue()); + } Response esResponse; try { esResponse = restClient.performRequest(request); @@ -436,9 +438,9 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { final Set uniqueNames = new HashSet<>(); if (randomBoolean()) { Header[] headers = RestClientTestUtil.randomHeaders(getRandom(), "Header"); - request.setHeaders(headers); for (Header header : headers) { - expectedRequest.addHeader(header); + request.addHeader(header.getName(), header.getValue()); + expectedRequest.addHeader(new Request.ReqHeader(header.getName(), header.getValue())); uniqueNames.add(header.getName()); } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 92d90bff71073..15fa5c0f99596 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -131,7 +131,7 @@ public void onFailure(Exception exception) { } /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}. + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddHeader()}. */ @Deprecated public void testPerformOldStyleAsyncWithNullHeaders() throws Exception { diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index 5ee97399b34e6..f3ce112fea1a1 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -27,9 +27,7 @@ import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.client.CredentialsProvider; import org.apache.http.client.config.RequestConfig; -import org.apache.http.entity.BasicHttpEntity; import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.impl.nio.reactor.IOReactorConfig; @@ -52,8 +50,6 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.security.KeyStore; -import java.util.Collections; -import java.util.Map; import java.util.concurrent.CountDownLatch; /** @@ -176,9 +172,8 @@ public void onFailure(Exception exception) { request.setJsonEntity("{\"json\":\"text\"}"); //end::rest-client-body-shorter //tag::rest-client-headers - request.setHeaders( - new BasicHeader("Accept", "text/plain"), - new BasicHeader("Cache-Control", "no-cache")); + request.addHeader("Accept", "text/plain"); + request.addHeader("Cache-Control", "no-cache"); //end::rest-client-headers //tag::rest-client-response-consumer request.setHttpAsyncResponseConsumerFactory( diff --git a/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java index a0a6641abbc5f..07bae6c17fdd2 100644 --- a/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java +++ b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java @@ -100,7 +100,7 @@ static Header[] randomHeaders(Random random, final String baseName) { if (random.nextBoolean()) { headerName = headerName + i; } - headers[i] = new BasicHeader(headerName, RandomStrings.randomAsciiOfLengthBetween(random, 3, 10)); + headers[i] = new BasicHeader(headerName, RandomStrings.randomAsciiLettersOfLengthBetween(random, 3, 10)); } return headers; } diff --git a/docs/java-rest/low-level/usage.asciidoc b/docs/java-rest/low-level/usage.asciidoc index 68367b9a64fdf..012ce418226cd 100644 --- a/docs/java-rest/low-level/usage.asciidoc +++ b/docs/java-rest/low-level/usage.asciidoc @@ -271,7 +271,7 @@ a `ContentType` of `application/json`. include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body-shorter] -------------------------------------------------- -And you can set a list of headers to send with the request: +And you can add one or more headers to send with the request: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java index bfa856e381b12..99132f0c89d5b 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -222,8 +221,8 @@ public void testThatMoreLikeThisQueryMultiTermVectorRequestContainsContextAndHea public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws IOException { final String IRRELEVANT_HEADER = "SomeIrrelevantHeader"; Request request = new Request("GET", "/" + queryIndex + "/_search"); - request.setHeaders(new BasicHeader(CUSTOM_HEADER, randomHeaderValue), - new BasicHeader(IRRELEVANT_HEADER, randomHeaderValue)); + request.addHeader(CUSTOM_HEADER, randomHeaderValue); + request.addHeader(IRRELEVANT_HEADER, randomHeaderValue); Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); List searchRequests = getRequests(SearchRequest.class); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java index 4ab64abda453b..2d139e7955ea9 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -33,7 +32,8 @@ public class CorsNotSetIT extends HttpSmokeTestCase { public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws IOException { String corsValue = "http://localhost:9200"; Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java index da48e51b63bbe..e79e80315501b 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -53,25 +52,29 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testThatRegularExpressionWorksOnMatch() throws IOException { - String corsValue = "http://localhost:9200"; - Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", corsValue)); - Response response = getRestClient().performRequest(request); - assertResponseWithOriginheader(response, corsValue); - - corsValue = "https://localhost:9201"; - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", corsValue)); - response = getRestClient().performRequest(request); - assertResponseWithOriginheader(response, corsValue); - assertThat(response.getHeader("Access-Control-Allow-Credentials"), is("true")); + { + String corsValue = "http://localhost:9200"; + Request request = new Request("GET", "/"); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); + Response response = getRestClient().performRequest(request); + assertResponseWithOriginHeader(response, corsValue); + } + { + String corsValue = "https://localhost:9201"; + Request request = new Request("GET", "/"); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); + Response response = getRestClient().performRequest(request); + assertResponseWithOriginHeader(response, corsValue); + assertThat(response.getHeader("Access-Control-Allow-Credentials"), is("true")); + } } public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOException { Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", "http://evil-host:9200")); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", "http://evil-host:9200"); try { getRestClient().performRequest(request); fail("request should have failed"); @@ -85,7 +88,7 @@ public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOExcep public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws IOException { Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar")); + request.addHeader("User-Agent", "Mozilla Bar"); Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); @@ -100,20 +103,20 @@ public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() public void testThatPreFlightRequestWorksOnMatch() throws IOException { String corsValue = "http://localhost:9200"; Request request = new Request("OPTIONS", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", corsValue), - new BasicHeader("Access-Control-Request-Method", "GET")); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); + request.addHeader("Access-Control-Request-Method", "GET"); Response response = getRestClient().performRequest(request); - assertResponseWithOriginheader(response, corsValue); + assertResponseWithOriginHeader(response, corsValue); assertNotNull(response.getHeader("Access-Control-Allow-Methods")); } public void testThatPreFlightRequestReturnsNullOnNonMatch() throws IOException { String corsValue = "http://evil-host:9200"; Request request = new Request("OPTIONS", "/"); - request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", corsValue), - new BasicHeader("Access-Control-Request-Method", "GET")); + request.addHeader("User-Agent", "Mozilla Bar"); + request.addHeader("Origin", corsValue); + request.addHeader("Access-Control-Request-Method", "GET"); try { getRestClient().performRequest(request); fail("request should have failed"); @@ -126,7 +129,7 @@ public void testThatPreFlightRequestReturnsNullOnNonMatch() throws IOException { } } - protected static void assertResponseWithOriginheader(Response response, String expectedCorsHeader) { + private static void assertResponseWithOriginHeader(Response response, String expectedCorsHeader) { assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), is(expectedCorsHeader)); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java index 6af08577393d9..a9a0a0c7ed945 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.http; import org.apache.http.HttpHeaders; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.test.rest.ESRestTestCase; @@ -39,7 +38,7 @@ public class HttpCompressionIT extends ESRestTestCase { public void testCompressesResponseIfRequested() throws IOException { Request request = new Request("GET", "/"); - request.setHeaders(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING)); + request.addHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING); Response response = client().performRequest(request); assertEquals(200, response.getStatusLine().getStatusCode()); assertEquals(GZIP_ENCODING, response.getHeader(HttpHeaders.CONTENT_ENCODING)); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java index e1d55afea1b54..976ba3131151f 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -47,7 +46,7 @@ public void testNoHandlerRespectsAcceptHeader() throws IOException { private void runTestNoHandlerRespectsAcceptHeader( final String accept, final String contentType, final String expect) throws IOException { Request request = new Request("GET", "/foo/bar/baz/qux/quux"); - request.setHeaders(new BasicHeader("Accept", accept)); + request.addHeader("Accept", accept); final ResponseException e = expectThrows(ResponseException.class, () -> getRestClient().performRequest(request)); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java index b4dbc50d52db7..ac2503f2c525c 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.http; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -26,8 +25,8 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import java.util.ArrayList; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import static org.hamcrest.Matchers.equalTo; @@ -62,7 +61,7 @@ public void testThatSettingHeadersWorks() throws IOException { } Request request = new Request("GET", "/_protected"); - request.setHeaders(new BasicHeader("Secret", "password")); + request.addHeader("Secret", "password"); Response authResponse = getRestClient().performRequest(request); assertThat(authResponse.getStatusLine().getStatusCode(), equalTo(200)); assertThat(authResponse.getHeader("Secret"), equalTo("granted")); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index e0b501c5f25e6..30ac94e343246 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -22,7 +22,6 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.http.HttpHost; import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -323,7 +322,7 @@ public void test() throws IOException { if (useDefaultNumberOfShards == false && testCandidate.getTestSection().getSkipSection().getFeatures().contains("default_shards") == false) { final Request request = new Request("PUT", "/_template/global"); - request.setHeaders(new BasicHeader("Content-Type", XContentType.JSON.mediaTypeWithoutParameters())); + request.addHeader("Content-Type", XContentType.JSON.mediaTypeWithoutParameters()); request.setEntity(new StringEntity("{\"index_patterns\":[\"*\"],\"settings\":{\"index.number_of_shards\":2}}")); adminClient().performRequest(request); } diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java index f7abb6f64f63c..bdbb75491ca87 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java @@ -5,20 +5,16 @@ */ package org.elasticsearch.xpack.qa.sql.security; -import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.NotEqualMessageBuilder; -import org.elasticsearch.xpack.qa.sql.security.SqlSecurityTestCase.AuditLogAsserter; import org.hamcrest.Matcher; import org.hamcrest.Matchers; @@ -30,7 +26,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.TreeMap; import java.util.stream.Collectors; import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.columnInfo; @@ -182,7 +177,7 @@ private static Map runSql(@Nullable String asUser, String mode, request.addParameter("mode", mode); } if (asUser != null) { - request.setHeaders(new BasicHeader("es-security-runas-user", asUser)); + request.addHeader("es-security-runas-user", asUser); } request.setEntity(entity); return toMap(client().performRequest(request)); diff --git a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java index e0cf0efac472e..80dd09d3c47a8 100644 --- a/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java +++ b/x-pack/qa/sql/src/main/java/org/elasticsearch/xpack/qa/sql/rest/RestSqlTestCase.java @@ -6,12 +6,9 @@ package org.elasticsearch.xpack.qa.sql.rest; import com.fasterxml.jackson.core.io.JsonStringEncoder; - -import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -321,10 +318,9 @@ private Map runSql(String mode, HttpEntity sql, String suffix) t if (false == mode.isEmpty()) { request.addParameter("mode", mode); // JDBC or PLAIN mode } - request.setHeaders(randomFrom( - new Header[] {}, - new Header[] {new BasicHeader("Accept", "*/*")}, - new Header[] {new BasicHeader("Accpet", "application/json")})); + if (randomBoolean()) { + request.addHeader("Accept", randomFrom("*/*", "application/json")); + } request.setEntity(sql); Response response = client().performRequest(request); try (InputStream content = response.getEntity().getContent()) { @@ -540,7 +536,7 @@ private Tuple runSqlAsText(String suffix, HttpEntity entity, Str Request request = new Request("POST", "/_xpack/sql" + suffix); request.addParameter("error_trace", "true"); request.setEntity(entity); - request.setHeaders(new BasicHeader("Accept", accept)); + request.addHeader("Accept", accept); Response response = client().performRequest(request); return new Tuple<>( Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)), From 2984734197223003dc80ed1ac4e8366f8d49ed1c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 22 May 2018 14:33:16 -0400 Subject: [PATCH 14/22] Simplify number of shards setting (#30783) This is code that was leftover from the move to one shard by default. Here in index metadata we were preserving the default number of shards settings independently of the area of code where we set this value on an index that does not explicitly have an number of shards setting. This took into consideration the es.index.max_number_of_shards system property, and was used in search requests to set the default maximum number of concurrent shard requests. We set the default there based on the default number of shards so that in a one-node case a search request could concurrently hit all shards on an index with the defaults. Now that we default to one shard, we expect fewer shards in clusters and this adjustment of the node count as the max number of concurrent shard requests is no longer needed. This commit then changes the default number of shards settings to be consistent with the value used when an index is created, and removes the now unneeded adjustment in search requests. --- .../action/search/TransportSearchAction.java | 13 ++++++------- .../cluster/metadata/IndexMetaData.java | 3 +-- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 6b39af478f432..46207b94c3af4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -341,13 +341,12 @@ private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, Sea return searchTransportService.getConnection(clusterName, discoveryNode); }; if (searchRequest.isMaxConcurrentShardRequestsSet() == false) { - // we try to set a default of max concurrent shard requests based on - // the node count but upper-bound it by 256 by default to keep it sane. A single - // search request that fans out lots of shards should hit a cluster too hard while 256 is already a lot. - // we multiply it by the default number of shards such that a single request in a cluster of 1 would hit all shards of a - // default index. - searchRequest.setMaxConcurrentShardRequests(Math.min(256, nodeCount - * IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getDefault(Settings.EMPTY))); + /* + * We try to set a default of max concurrent shard requests based on the node count but upper-bound it by 256 by default to keep + * it sane. A single search request that fans out to lots of shards should not hit a cluster too hard while 256 is already a + * lot. + */ + searchRequest.setMaxConcurrentShardRequests(Math.min(256, nodeCount)); } boolean preFilterSearchShards = shouldPreFilterSearchShards(searchRequest, shardIterators); searchAsyncAction(task, searchRequest, shardIterators, timeProvider, connectionLookup, clusterState.version(), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 7af2ec2d237d2..db45ce6c9e353 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -181,8 +181,7 @@ static Setting buildNumberOfShardsSetting() { if (maxNumShards < 1) { throw new IllegalArgumentException("es.index.max_number_of_shards must be > 0"); } - return Setting.intSetting(SETTING_NUMBER_OF_SHARDS, Math.min(5, maxNumShards), 1, maxNumShards, - Property.IndexScope, Property.Final); + return Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 1, 1, maxNumShards, Property.IndexScope, Property.Final); } public static final String INDEX_SETTING_PREFIX = "index."; From 0fc22de33695a7b3da26be29a93208a413b3f472 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 22 May 2018 14:34:29 -0400 Subject: [PATCH 15/22] Reduce CLI scripts to one-liners on Windows (#30772) This commit reduces the Windows CLI scripts to one-liners by moving all of the redundant logic to an elasticsearch-cli script. This commit is only the Windows side, a previous commit covered the Linux side. --- distribution/src/bin/elasticsearch-cli.bat | 22 +++++++++++++++++++ .../src/bin/elasticsearch-keystore.bat | 15 ++++--------- distribution/src/bin/elasticsearch-plugin.bat | 13 +++-------- .../src/bin/elasticsearch-translog.bat | 13 +++-------- .../src/main/bin/elasticsearch-certgen.bat | 16 ++++---------- .../src/main/bin/elasticsearch-certutil.bat | 16 ++++---------- .../src/main/bin/elasticsearch-migrate.bat | 16 ++++---------- .../main/bin/elasticsearch-saml-metadata.bat | 16 ++++---------- .../bin/elasticsearch-setup-passwords.bat | 16 ++++---------- .../src/main/bin/elasticsearch-syskeygen.bat | 16 ++++---------- .../src/main/bin/elasticsearch-users.bat | 16 ++++---------- .../src/main/bin/x-pack-security-env.bat | 2 -- .../src/main/bin/elasticsearch-croneval.bat | 16 ++++---------- .../src/main/bin/x-pack-watcher-env.bat | 2 -- 14 files changed, 64 insertions(+), 131 deletions(-) create mode 100644 distribution/src/bin/elasticsearch-cli.bat diff --git a/distribution/src/bin/elasticsearch-cli.bat b/distribution/src/bin/elasticsearch-cli.bat new file mode 100644 index 0000000000000..efda5f653ef31 --- /dev/null +++ b/distribution/src/bin/elasticsearch-cli.bat @@ -0,0 +1,22 @@ +call "%~dp0elasticsearch-env.bat" || exit /b 1 + +if defined ES_ADDITIONAL_SOURCES ( + for %%a in ("%ES_ADDITIONAL_SOURCES:;=","%") do ( + call %~dp0%%a + ) +) + +for /f "tokens=1*" %%a in ("%*") do ( + set main_class=%%a + set arguments=%%b +) + +%JAVA% ^ + %ES_JAVA_OPTS% ^ + -Des.path.home="%ES_HOME%" ^ + -Des.path.conf="%ES_PATH_CONF%" ^ + -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ + -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ + -cp "%ES_CLASSPATH%" ^ + %main_class% ^ + %arguments% diff --git a/distribution/src/bin/elasticsearch-keystore.bat b/distribution/src/bin/elasticsearch-keystore.bat index 1d6616983d8cc..9bd72a65745a9 100644 --- a/distribution/src/bin/elasticsearch-keystore.bat +++ b/distribution/src/bin/elasticsearch-keystore.bat @@ -3,17 +3,10 @@ setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ - org.elasticsearch.common.settings.KeyStoreCli ^ - %* +call "%~dp0elasticsearch-cli.bat" ^ + org.elasticsearch.common.settings.KeyStoreCli ^ + %* ^ + || exit /b 1 endlocal endlocal diff --git a/distribution/src/bin/elasticsearch-plugin.bat b/distribution/src/bin/elasticsearch-plugin.bat index b3b94a31863f1..d46ef295d085b 100644 --- a/distribution/src/bin/elasticsearch-plugin.bat +++ b/distribution/src/bin/elasticsearch-plugin.bat @@ -3,17 +3,10 @@ setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.plugins.PluginCli ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/distribution/src/bin/elasticsearch-translog.bat b/distribution/src/bin/elasticsearch-translog.bat index 492c1f0831263..37d96bbed6c4e 100644 --- a/distribution/src/bin/elasticsearch-translog.bat +++ b/distribution/src/bin/elasticsearch-translog.bat @@ -3,17 +3,10 @@ setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.index.translog.TranslogToolCli ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat index d44ca227c07fd..8c8a0c69f5626 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.core.ssl.CertificateGenerateTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat index 4426fb87d3ba6..f898f885ce0a3 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.core.ssl.CertificateTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat index 79090b6490790..f9486979e6bc3 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat index 9e5625d0b912e..4ddb8da3ff143 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.authc.saml.SamlMetadataCommand ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat index b449ca09a6c30..f380e5f55271f 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat index 3ee9dcb3ba9cb..1eff4aad8251e 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat index b32b9398f9971..7f7347d706ff5 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-security-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.security.authc.file.tool.UsersTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat b/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat index 035f1c965ffb6..d003412fc08d9 100644 --- a/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat +++ b/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat @@ -2,6 +2,4 @@ rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one rem or more contributor license agreements. Licensed under the Elastic License; rem you may not use this file except in compliance with the Elastic License. -call "%~dp0x-pack-env.bat" || exit /b 1 - set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack-security/* diff --git a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat index 7fd983c9ba5fe..37ca14dd094cc 100644 --- a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat +++ b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat @@ -7,19 +7,11 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions -call "%~dp0elasticsearch-env.bat" || exit /b 1 - -call "%~dp0x-pack-watcher-env.bat" || exit /b 1 - -%JAVA% ^ - %ES_JAVA_OPTS% ^ - -Des.path.home="%ES_HOME%" ^ - -Des.path.conf="%ES_PATH_CONF%" ^ - -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ - -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ - -cp "%ES_CLASSPATH%" ^ +set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-watcher-env +call "%~dp0elasticsearch-cli.bat" ^ org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool ^ - %* + %* ^ + || exit /b 1 endlocal endlocal diff --git a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat index 010c154eb5a39..4c7f762dca26c 100644 --- a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat +++ b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat @@ -2,6 +2,4 @@ rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one rem or more contributor license agreements. Licensed under the Elastic License; rem you may not use this file except in compliance with the Elastic License. -call "%~dp0x-pack-env.bat" || exit /b 1 - set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack-watcher/* From a8cea90e1084983b2d045edae84187fe8708399d Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Tue, 22 May 2018 14:55:20 -0500 Subject: [PATCH 16/22] Modify state of VerifyRepositoryResponse for bwc (#30762) The VerifyRepositoryResponse class holds a DiscoveryNode[], but the nodes themselves are not serialized to a REST API consumer. Since we do not want to put all of a DiscoveryNode over the wire, be it REST or Transport since its unused, this change introduces a BWC compatible change in ser/deser of the Response. Anything 6.4 and above will read/write a NodeView, and anything prior will read/write a DiscoveryNode. Further changes to 7.0 will be introduced to remove the BWC shim and only read/write NodeView, and hold a List as the VerifyRepositoryResponse internal state. --- .../verify/VerifyRepositoryResponse.java | 138 +++++++++++++++--- .../repositories/RepositoryBlocksIT.java | 2 +- .../cluster/snapshots/SnapshotBlocksIT.java | 2 +- .../snapshots/RepositoriesIT.java | 2 +- 4 files changed, 118 insertions(+), 26 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java index 27612a3dab24b..c3fb2d58bebf3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java @@ -19,23 +19,112 @@ package org.elasticsearch.action.admin.cluster.repositories.verify; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; /** - * Unregister repository response + * Verify repository response */ public class VerifyRepositoryResponse extends ActionResponse implements ToXContentObject { - private DiscoveryNode[] nodes; + static final String NODES = "nodes"; + static final String NAME = "name"; + + public static class NodeView implements Writeable, ToXContentObject { + private static final ObjectParser.NamedObjectParser PARSER; + static { + ObjectParser internalParser = new ObjectParser<>(NODES); + internalParser.declareString(NodeView::setName, new ParseField(NAME)); + PARSER = (p, v, name) -> internalParser.parse(p, new NodeView(name), null); + } + + final String nodeId; + String name; + + public NodeView(String nodeId) { this.nodeId = nodeId; } + + public NodeView(String nodeId, String name) { + this(nodeId); + this.name = name; + } + + public NodeView(StreamInput in) throws IOException { + this(in.readString(), in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(nodeId); + out.writeString(name); + } + + void setName(String name) { this.name = name; } + + public String getName() { return name; } + + public String getNodeId() { return nodeId; } + + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(nodeId); + { + builder.field(NAME, name); + } + builder.endObject(); + return builder; + } + + /** + * Temporary method that allows turning a {@link NodeView} into a {@link DiscoveryNode}. This representation will never be used in + * practice, because in >= 6.4 a consumer of the response will only be able to retrieve a representation of {@link NodeView} + * objects. + * + * Effectively this will be used to hold the state of the object in 6.x so there is no need to have 2 backing objects that + * represent the state of the Response. In practice these will always be read by a consumer as a NodeView, but it eases the + * transition to master which will not contain any representation of a {@link DiscoveryNode}. + */ + DiscoveryNode convertToDiscoveryNode() { + return new DiscoveryNode(name, nodeId, "", "", "", new TransportAddress(TransportAddress.META_ADDRESS, 0), + Collections.emptyMap(), Collections.emptySet(), Version.CURRENT); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + NodeView other = (NodeView) obj; + return Objects.equals(nodeId, other.nodeId) && + Objects.equals(name, other.name); + } + + @Override + public int hashCode() { + return Objects.hash(nodeId, name); + } + } + + private List nodes; private ClusterName clusterName; @@ -45,53 +134,56 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte public VerifyRepositoryResponse(ClusterName clusterName, DiscoveryNode[] nodes) { this.clusterName = clusterName; - this.nodes = nodes; + this.nodes = Arrays.asList(nodes); } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - clusterName = new ClusterName(in); - nodes = new DiscoveryNode[in.readVInt()]; - for (int i=0; i n.convertToDiscoveryNode()).collect(Collectors.toList()); + } else { + clusterName = new ClusterName(in); + this.nodes = in.readList(DiscoveryNode::new); } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - clusterName.writeTo(out); - out.writeVInt(nodes.length); - for (DiscoveryNode node : nodes) { - node.writeTo(out); + if (Version.CURRENT.onOrAfter(Version.V_7_0_0_alpha1)) { + out.writeList(getNodes()); + } else { + clusterName.writeTo(out); + out.writeList(nodes); } } - public DiscoveryNode[] getNodes() { - return nodes; + public List getNodes() { + return nodes.stream().map(dn -> new NodeView(dn.getId(), dn.getName())).collect(Collectors.toList()); } public ClusterName getClusterName() { return clusterName; } - static final class Fields { - static final String NODES = "nodes"; - static final String NAME = "name"; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.startObject(Fields.NODES); - for (DiscoveryNode node : nodes) { - builder.startObject(node.getId()); - builder.field(Fields.NAME, node.getName()); + { + builder.startObject(NODES); + { + for (DiscoveryNode node : nodes) { + builder.startObject(node.getId()); + { + builder.field(NAME, node.getName()); + } + builder.endObject(); + } + } builder.endObject(); } builder.endObject(); - builder.endObject(); return builder; } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index 0aa5691dc67ad..43d94f56e5af3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -67,7 +67,7 @@ public void testVerifyRepositoryWithBlocks() { try { setClusterReadOnly(true); VerifyRepositoryResponse response = client().admin().cluster().prepareVerifyRepository("test-repo-blocks").execute().actionGet(); - assertThat(response.getNodes().length, equalTo(cluster().numDataAndMasterNodes())); + assertThat(response.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); } finally { setClusterReadOnly(false); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index c66fa4b244f18..5ca7cb1e5066d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -73,7 +73,7 @@ protected void setUpRepository() throws Exception { logger.info("--> verify the repository"); VerifyRepositoryResponse verifyResponse = client().admin().cluster().prepareVerifyRepository(REPOSITORY_NAME).get(); - assertThat(verifyResponse.getNodes().length, equalTo(cluster().numDataAndMasterNodes())); + assertThat(verifyResponse.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); logger.info("--> create a snapshot"); CreateSnapshotResponse snapshotResponse = client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) diff --git a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java index d9d06c26b7dcf..23cb579bfdc92 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -61,7 +61,7 @@ public void testRepositoryCreation() throws Exception { logger.info("--> verify the repository"); int numberOfFiles = FileSystemUtils.files(location).length; VerifyRepositoryResponse verifyRepositoryResponse = client.admin().cluster().prepareVerifyRepository("test-repo-1").get(); - assertThat(verifyRepositoryResponse.getNodes().length, equalTo(cluster().numDataAndMasterNodes())); + assertThat(verifyRepositoryResponse.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); logger.info("--> verify that we didn't leave any files as a result of verification"); assertThat(FileSystemUtils.files(location).length, equalTo(numberOfFiles)); From a5d90e919fceebaba10765ef643f4ed578e16914 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 22 May 2018 17:17:34 -0400 Subject: [PATCH 17/22] QA: Add xpack tests to rolling upgrade (#30795) A rolling upgrade from oss Elasticsearch to the default distribution of Elasticsearch is significantly different than a full cluster restart to install a plugin and is again different from starting a new cluster with xpack installed. So this adds some basic tests to make sure that the rolling upgrade that enables xpack works at all. This also removes some unused imports from the tests that I modified in PR #30728. I didn't mean to leave them. --- .../upgrades/AbstractRollingTestCase.java | 25 ---- .../elasticsearch/upgrades/RecoveryIT.java | 1 - .../org/elasticsearch/upgrades/XPackIT.java | 111 ++++++++++++++++++ 3 files changed, 111 insertions(+), 26 deletions(-) create mode 100644 qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java index 6f4453aa06cc9..eb5517b7acb56 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -18,33 +18,8 @@ */ package org.elasticsearch.upgrades; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.elasticsearch.Version; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Response; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.test.rest.yaml.ObjectPath; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Future; -import java.util.function.Predicate; - -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; -import static java.util.Collections.emptyMap; -import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING; -import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.notNullValue; public abstract class AbstractRollingTestCase extends ESRestTestCase { protected enum ClusterType { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 350636551d9ad..1351de16cf718 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ObjectPath; import java.io.IOException; diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java new file mode 100644 index 0000000000000..3ed98a5d1f772 --- /dev/null +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.common.Booleans; +import org.junit.Before; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import static org.hamcrest.Matchers.equalTo; +import static org.junit.Assume.assumeThat; + +/** + * Basic tests for simple xpack functionality that are only run if the + * cluster is the on the "zip" distribution. + */ +public class XPackIT extends AbstractRollingTestCase { + @Before + public void skipIfNotXPack() { + assumeThat("test is only supported if the distribution contains xpack", + System.getProperty("tests.distribution"), equalTo("zip")); + assumeThat("running this on the unupgraded cluster would change its state and it wouldn't work prior to 6.3 anyway", + CLUSTER_TYPE, equalTo(ClusterType.UPGRADED)); + /* + * *Mostly* we want this for when we're upgrading from pre-6.3's + * zip distribution which doesn't contain xpack to post 6.3's zip + * distribution which *does* contain xpack. But we'll also run it + * on all upgrades for completeness's sake. + */ + } + + /** + * Test a basic feature (SQL) which doesn't require any trial license. + * Note that the test methods on this class can run in any order so we + * might have already installed a trial license. + */ + public void testBasicFeature() throws IOException { + Request bulk = new Request("POST", "/sql_test/doc/_bulk"); + bulk.setJsonEntity( + "{\"index\":{}}\n" + + "{\"f\": \"1\"}\n" + + "{\"index\":{}}\n" + + "{\"f\": \"2\"}\n"); + bulk.addParameter("refresh", "true"); + client().performRequest(bulk); + + Request sql = new Request("POST", "/_xpack/sql"); + sql.setJsonEntity("{\"query\": \"SELECT * FROM sql_test WHERE f > 1 ORDER BY f ASC\"}"); + String response = EntityUtils.toString(client().performRequest(sql).getEntity()); + assertEquals("{\"columns\":[{\"name\":\"f\",\"type\":\"text\"}],\"rows\":[[\"2\"]]}", response); + } + + /** + * Test creating a trial license and using it. This is interesting because + * our other tests test cover starting a new cluster with the default + * distribution and enabling the trial license but this test is the only + * one that can upgrade from the oss distribution to the default + * distribution with xpack and the create a trial license. We don't + * do a lot with the trial license because for the most + * part those things are tested elsewhere, off in xpack. But we do use the + * trial license a little bit to make sure that it works. + */ + public void testTrialLicense() throws IOException { + Request startTrial = new Request("POST", "/_xpack/license/start_trial"); + startTrial.addParameter("acknowledge", "true"); + client().performRequest(startTrial); + + String noJobs = EntityUtils.toString( + client().performRequest(new Request("GET", "/_xpack/ml/anomaly_detectors")).getEntity()); + assertEquals("{\"count\":0,\"jobs\":[]}", noJobs); + + Request createJob = new Request("PUT", "/_xpack/ml/anomaly_detectors/test_job"); + createJob.setJsonEntity( + "{\n" + + " \"analysis_config\" : {\n" + + " \"bucket_span\": \"10m\",\n" + + " \"detectors\": [\n" + + " {\n" + + " \"function\": \"sum\",\n" + + " \"field_name\": \"total\"\n" + + " }\n" + + " ]\n" + + " },\n" + + " \"data_description\": {\n" + + " \"time_field\": \"timestamp\",\n" + + " \"time_format\": \"epoch_ms\"\n" + + " }\n" + + "}\n"); + client().performRequest(createJob); + } +} From 63a57995264cec6705d7ecf83469b2724752b99e Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Tue, 22 May 2018 17:18:05 -0600 Subject: [PATCH 18/22] Remove http pipelining from integration test case (#30788) This is related to #29500. We are removing the ability to disable http pipelining. This PR removes the references to disabling pipelining in the integration test case. --- .../single/SingleNodeDiscoveryIT.java | 1 - .../elasticsearch/test/ESIntegTestCase.java | 2 +- .../test/InternalTestCluster.java | 4 +--- .../test/test/InternalTestClusterTests.java | 23 +++++++------------ .../audit/index/IndexAuditTrailTests.java | 2 +- .../RemoteIndexAuditTrailStartingTests.java | 2 +- 6 files changed, 12 insertions(+), 22 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index fc284b9f5e80c..fdc36152cc895 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -150,7 +150,6 @@ public Path nodeConfigPath(int nodeOrdinal) { internalCluster().getClusterName(), configurationSource, 0, - false, "other", Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), Function.identity())) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 7210fadd7ead5..505a5937d290b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1829,7 +1829,7 @@ protected TestCluster buildTestCluster(Scope scope, long seed) throws IOExceptio return new InternalTestCluster(seed, createTempDir(), supportsDedicatedMasters, getAutoMinMasterNodes(), minNumDataNodes, maxNumDataNodes, InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(), - InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins, getClientWrapper()); + nodePrefix, mockPlugins, getClientWrapper()); } protected NodeConfigurationSource getNodeConfigSource() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index b945f7d84eb95..c786107361671 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -171,8 +171,6 @@ public final class InternalTestCluster extends TestCluster { static final int DEFAULT_MIN_NUM_CLIENT_NODES = 0; static final int DEFAULT_MAX_NUM_CLIENT_NODES = 1; - static final boolean DEFAULT_ENABLE_HTTP_PIPELINING = true; - /* sorted map to make traverse order reproducible, concurrent since we do checks on it not within a sync block */ private final NavigableMap nodes = new TreeMap<>(); @@ -219,7 +217,7 @@ public final class InternalTestCluster extends TestCluster { public InternalTestCluster(long clusterSeed, Path baseDir, boolean randomlyAddDedicatedMasters, boolean autoManageMinMasterNodes, int minNumDataNodes, int maxNumDataNodes, String clusterName, NodeConfigurationSource nodeConfigurationSource, int numClientNodes, - boolean enableHttpPipelining, String nodePrefix, Collection> mockPlugins, Function clientWrapper) { + String nodePrefix, Collection> mockPlugins, Function clientWrapper) { super(clusterSeed); this.autoManageMinMasterNodes = autoManageMinMasterNodes; this.clientWrapper = clientWrapper; diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index c70708c73acbf..e25f917d69958 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -19,8 +19,6 @@ */ package org.elasticsearch.test.test; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; @@ -28,6 +26,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.env.NodeEnvironment; @@ -63,8 +62,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.not; /** @@ -86,16 +83,15 @@ public void testInitializiationIsConsistent() { String clusterName = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); NodeConfigurationSource nodeConfigurationSource = NodeConfigurationSource.EMPTY; int numClientNodes = randomIntBetween(0, 10); - boolean enableHttpPipelining = randomBoolean(); String nodePrefix = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); Path baseDir = createTempDir(); InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, randomBoolean(), minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity()); + nodePrefix, Collections.emptyList(), Function.identity()); InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, randomBoolean(), minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, Collections.emptyList(), Function.identity()); + nodePrefix, Collections.emptyList(), Function.identity()); // TODO: this is not ideal - we should have a way to make sure ports are initialized in the same way assertClusters(cluster0, cluster1, false); @@ -211,16 +207,15 @@ public Settings transportClientSettings() { } }; - boolean enableHttpPipelining = randomBoolean(); String nodePrefix = "foobar"; Path baseDir = createTempDir(); InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, mockPlugins(), Function.identity()); + nodePrefix, mockPlugins(), Function.identity()); InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, mockPlugins(), Function.identity()); + nodePrefix, mockPlugins(), Function.identity()); assertClusters(cluster0, cluster1, false); long seed = randomLong(); @@ -280,12 +275,11 @@ public Settings transportClientSettings() { .put(NetworkModule.TRANSPORT_TYPE_KEY, transportClient).build(); } }; - boolean enableHttpPipelining = randomBoolean(); String nodePrefix = "test"; Path baseDir = createTempDir(); InternalTestCluster cluster = new InternalTestCluster(clusterSeed, baseDir, masterNodes, true, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, - enableHttpPipelining, nodePrefix, mockPlugins(), Function.identity()); + nodePrefix, mockPlugins(), Function.identity()); try { cluster.beforeTest(random(), 0.0); final int originalMasterCount = cluster.numMasterNodes(); @@ -390,7 +384,7 @@ public Settings transportClientSettings() { return Settings.builder() .put(NetworkModule.TRANSPORT_TYPE_KEY, transportClient).build(); } - }, 0, randomBoolean(), "", mockPlugins(), Function.identity()); + }, 0, "", mockPlugins(), Function.identity()); cluster.beforeTest(random(), 0.0); List roles = new ArrayList<>(); for (int i = 0; i < numNodes; i++) { @@ -473,11 +467,10 @@ public Settings transportClientSettings() { .put(NetworkModule.TRANSPORT_TYPE_KEY, transportClient).build(); } }; - boolean enableHttpPipelining = randomBoolean(); String nodePrefix = "test"; Path baseDir = createTempDir(); InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, false, true, 2, 2, - "test", nodeConfigurationSource, 0, enableHttpPipelining, nodePrefix, + "test", nodeConfigurationSource, 0, nodePrefix, mockPlugins(), Function.identity()); try { cluster.beforeTest(random(), 0.0); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java index ec448f14e9160..dab3d023f65d3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java @@ -214,7 +214,7 @@ protected void addDefaultSecurityTransportType(Settings.Builder builder, Setting mockPlugins.add(getTestTransportPlugin()); } remoteCluster = new InternalTestCluster(randomLong(), createTempDir(), false, true, numNodes, numNodes, cluster2Name, - cluster2SettingsSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, mockPlugins, + cluster2SettingsSource, 0, SECOND_CLUSTER_NODE_PREFIX, mockPlugins, useSecurity ? getClientWrapper() : Function.identity()); remoteCluster.beforeTest(random(), 0.5); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java index 7002803a3d49c..96bba962237fe 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/RemoteIndexAuditTrailStartingTests.java @@ -117,7 +117,7 @@ public Settings nodeSettings(int nodeOrdinal) { } }; remoteCluster = new InternalTestCluster(randomLong(), createTempDir(), false, true, numNodes, numNodes, - cluster2Name, cluster2SettingsSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, getMockPlugins(), getClientWrapper()); + cluster2Name, cluster2SettingsSource, 0, SECOND_CLUSTER_NODE_PREFIX, getMockPlugins(), getClientWrapper()); remoteCluster.beforeTest(random(), 0.0); assertNoTimeout(remoteCluster.client().admin().cluster().prepareHealth().setWaitForGreenStatus().get()); } From 739bb4f0eca6b7c802bf47e41b1d42d5c2508d66 Mon Sep 17 00:00:00 2001 From: Fernando Medina Corey Date: Tue, 22 May 2018 22:09:04 -0700 Subject: [PATCH 19/22] Fix a grammatical error in the 'search types' documentation. Simple grammatical fix. --- docs/reference/search/request/search-type.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/request/search-type.asciidoc b/docs/reference/search/request/search-type.asciidoc index 622b01c453e0a..7cac034f29c25 100644 --- a/docs/reference/search/request/search-type.asciidoc +++ b/docs/reference/search/request/search-type.asciidoc @@ -7,7 +7,7 @@ scattered to all the relevant shards and then all the results are gathered back. When doing scatter/gather type execution, there are several ways to do that, specifically with search engines. -One of the questions when executing a distributed search is how much +One of the questions when executing a distributed search is how many results to retrieve from each shard. For example, if we have 10 shards, the 1st shard might hold the most relevant results from 0 till 10, with other shards results ranking below it. For this reason, when executing a From 886db84ad21bf7666a5027bd1487c961ebf14b9c Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 23 May 2018 08:55:21 +0200 Subject: [PATCH 20/22] Expose Lucene's FeatureField. (#30618) Lucene has a new `FeatureField` which gives the ability to record numeric features as term frequencies. Its main benefit is that it allows to boost queries with the values of these features and efficiently skip non-competitive documents at the same time using block-max WAND and indexed impacts. --- docs/reference/mapping/types.asciidoc | 4 +- docs/reference/mapping/types/feature.asciidoc | 59 +++ .../query-dsl/feature-query.asciidoc | 181 +++++++++ .../query-dsl/special-queries.asciidoc | 7 + .../index/mapper/FeatureFieldMapper.java | 248 ++++++++++++ .../index/mapper/FeatureMetaFieldMapper.java | 151 ++++++++ .../index/mapper/MapperExtrasPlugin.java | 18 +- .../index/query/FeatureQueryBuilder.java | 354 ++++++++++++++++++ .../index/mapper/FeatureFieldMapperTests.java | 173 +++++++++ .../index/mapper/FeatureFieldTypeTests.java | 46 +++ .../mapper/FeatureMetaFieldMapperTests.java | 58 +++ .../mapper/FeatureMetaFieldTypeTests.java | 29 ++ .../index/query/FeatureQueryBuilderTests.java | 130 +++++++ .../rest-api-spec/test/feature/10_basic.yml | 160 ++++++++ 14 files changed, 1616 insertions(+), 2 deletions(-) create mode 100644 docs/reference/mapping/types/feature.asciidoc create mode 100644 docs/reference/query-dsl/feature-query.asciidoc create mode 100644 modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java create mode 100644 modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapper.java create mode 100644 modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java create mode 100644 modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldMapperTests.java create mode 100644 modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldTypeTests.java create mode 100644 modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapperTests.java create mode 100644 modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldTypeTests.java create mode 100644 modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java create mode 100644 modules/mapper-extras/src/test/resources/rest-api-spec/test/feature/10_basic.yml diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index 2cbc3a5bc54ad..ecb2e8dace23a 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -40,6 +40,8 @@ string:: <> and <> <>:: Defines parent/child relation for documents within the same index +<>:: Record numeric features to boost hits at query time. + [float] === Multi-fields @@ -86,6 +88,6 @@ include::types/percolator.asciidoc[] include::types/parent-join.asciidoc[] - +include::types/feature.asciidoc[] diff --git a/docs/reference/mapping/types/feature.asciidoc b/docs/reference/mapping/types/feature.asciidoc new file mode 100644 index 0000000000000..3b5e78d5fb46b --- /dev/null +++ b/docs/reference/mapping/types/feature.asciidoc @@ -0,0 +1,59 @@ +[[feature]] +=== Feature datatype + +A `feature` field can index numbers so that they can later be used to boost +documents in queries with a <> query. + +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "_doc": { + "properties": { + "pagerank": { + "type": "feature" <1> + }, + "url_length": { + "type": "feature", + "positive_score_impact": false <2> + } + } + } + } +} + +PUT my_index/_doc/1 +{ + "pagerank": 8, + "url_length": 22 +} + +GET my_index/_search +{ + "query": { + "feature": { + "field": "pagerank" + } + } +} +-------------------------------------------------- +// CONSOLE +<1> Feature fields must use the `feature` field type +<2> Features that correlate negatively with the score need to declare it + +NOTE: `feature` fields only support single-valued fields and strictly positive +values. Multi-valued fields and negative values will be rejected. + +NOTE: `feature` fields do not support querying, sorting or aggregating. They may +only be used within <> queries. + +NOTE: `feature` fields only preserve 9 significant bits for the precision, which +translates to a relative error of about 0.4%. + +Features that correlate negatively with the score should set +`positive_score_impact` to `false` (defaults to `true`). This will be used by +the <> query to modify the scoring formula +in such a way that the score decreases with the value of the feature instead of +increasing. For instance in web search, the url length is a commonly used +feature which correlates negatively with scores. diff --git a/docs/reference/query-dsl/feature-query.asciidoc b/docs/reference/query-dsl/feature-query.asciidoc new file mode 100644 index 0000000000000..19c29b1cf3ab8 --- /dev/null +++ b/docs/reference/query-dsl/feature-query.asciidoc @@ -0,0 +1,181 @@ +[[query-dsl-feature-query]] +=== Feature Query + +The `feature` query is a specialized query that only works on +<> fields. Its goal is to boost the score of documents based +on the values of numeric features. It is typically put in a `should` clause of +a <> query so that its score is added to the score +of the query. + +Compared to using <> or other +ways to modify the score, this query has the benefit of being able to +efficiently skip non-competitive hits when +<> is set to `false`. Speedups may be +spectacular. + +Here is an example: + +[source,js] +-------------------------------------------------- +PUT test +{ + "mappings": { + "_doc": { + "properties": { + "pagerank": { + "type": "feature" + }, + "url_length": { + "type": "feature", + "positive_score_impact": false + } + } + } + } +} + +PUT test/_doc/1 +{ + "pagerank": 10, + "url_length": 50 +} + +PUT test/_doc/2 +{ + "pagerank": 100, + "url_length": 20 +} + +POST test/_refresh + +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank" + } + } +} + +GET test/_search +{ + "query": { + "feature": { + "field": "url_length" + } + } +} +-------------------------------------------------- +// CONSOLE + +[float] +=== Supported functions + +The `feature` query supports 3 functions in order to boost scores using the +values of features. If you do not know where to start, we recommend that you +start with the `saturation` function, which is the default when no function is +provided. + +[float] +==== Saturation + +This function gives a score that is equal to `S / (S + pivot)` where `S` is the +value of the feature and `pivot` is a configurable pivot value so that the +result will be less than +0.5+ if `S` is less than pivot and greater than +0.5+ +otherwise. Scores are always is +(0, 1)+. + +If the feature has a negative score impact then the function will be computed as +`pivot / (S + pivot)`, which decreases when `S` increases. + +[source,js] +-------------------------------------------------- +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank", + "saturation": { + "pivot": 8 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If +pivot+ is not supplied then Elasticsearch will compute a default value that +will be approximately equal to the geometric mean of all feature values that +exist in the index. We recommend this if you haven't had the opportunity to +train a good pivot value. + +[source,js] +-------------------------------------------------- +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank", + "saturation": {} + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[float] +==== Logarithm + +This function gives a score that is equal to `log(scaling_factor + S)` where +`S` is the value of the feature and `scaling_factor` is a configurable scaling +factor. Scores are unbounded. + +This function only supports features that have a positive score impact. + +[source,js] +-------------------------------------------------- +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank", + "log": { + "scaling_factor": 4 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[float] +==== Sigmoid + +This function is an extension of `saturation` which adds a configurable +exponent. Scores are computed as `S^exp^ / (S^exp^ + pivot^exp^)`. Like for the +`saturation` function, `pivot` is the value of `S` that gives a score of +0.5+ +and scores are in +(0, 1)+. + +`exponent` must be positive, but is typically in +[0.5, 1]+. A good value should +be computed via traning. If you don't have the opportunity to do so, we recommend +that you stick to the `saturation` function instead. + +[source,js] +-------------------------------------------------- +GET test/_search +{ + "query": { + "feature": { + "field": "pagerank", + "sigmoid": { + "pivot": 7, + "exponent": 0.6 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index a062fa7ddb1fb..4c69889040eb1 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -19,6 +19,11 @@ This query allows a script to act as a filter. Also see the This query finds queries that are stored as documents that match with the specified document. +<>:: + +A query that computes scores based on the values of numeric features and is +able to efficiently skip non-competitive hits. + <>:: A query that accepts other queries as json or yaml string. @@ -29,4 +34,6 @@ include::script-query.asciidoc[] include::percolate-query.asciidoc[] +include::feature-query.asciidoc[] + include::wrapper-query.asciidoc[] diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java new file mode 100644 index 0000000000000..5b0158ff55b5f --- /dev/null +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java @@ -0,0 +1,248 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * A {@link FieldMapper} that exposes Lucene's {@link FeatureField}. + */ +public class FeatureFieldMapper extends FieldMapper { + + public static final String CONTENT_TYPE = "feature"; + + public static class Defaults { + public static final MappedFieldType FIELD_TYPE = new FeatureFieldType(); + + static { + FIELD_TYPE.setTokenized(false); + FIELD_TYPE.setIndexOptions(IndexOptions.NONE); + FIELD_TYPE.setHasDocValues(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.freeze(); + } + } + + public static class Builder extends FieldMapper.Builder { + + public Builder(String name) { + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); + builder = this; + } + + @Override + public FeatureFieldType fieldType() { + return (FeatureFieldType) super.fieldType(); + } + + public Builder positiveScoreImpact(boolean v) { + fieldType().setPositiveScoreImpact(v); + return builder; + } + + @Override + public FeatureFieldMapper build(BuilderContext context) { + setupFieldType(context); + return new FeatureFieldMapper( + name, fieldType, defaultFieldType, + context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); + } + } + + public static class TypeParser implements Mapper.TypeParser { + @Override + public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + FeatureFieldMapper.Builder builder = new FeatureFieldMapper.Builder(name); + for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { + Map.Entry entry = iterator.next(); + String propName = entry.getKey(); + Object propNode = entry.getValue(); + if (propName.equals("positive_score_impact")) { + builder.positiveScoreImpact(XContentMapValues.nodeBooleanValue(propNode)); + iterator.remove(); + } + } + return builder; + } + } + + public static final class FeatureFieldType extends MappedFieldType { + + private boolean positiveScoreImpact = true; + + public FeatureFieldType() { + setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); + } + + protected FeatureFieldType(FeatureFieldType ref) { + super(ref); + this.positiveScoreImpact = ref.positiveScoreImpact; + } + + public FeatureFieldType clone() { + return new FeatureFieldType(this); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o) == false) { + return false; + } + FeatureFieldType other = (FeatureFieldType) o; + return Objects.equals(positiveScoreImpact, other.positiveScoreImpact); + } + + @Override + public int hashCode() { + int h = super.hashCode(); + h = 31 * h + Objects.hashCode(positiveScoreImpact); + return h; + } + + @Override + public void checkCompatibility(MappedFieldType other, List conflicts) { + super.checkCompatibility(other, conflicts); + if (positiveScoreImpact != ((FeatureFieldType) other).positiveScoreImpact()) { + conflicts.add("mapper [" + name() + "] has different [positive_score_impact] values"); + } + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + public boolean positiveScoreImpact() { + return positiveScoreImpact; + } + + public void setPositiveScoreImpact(boolean positiveScoreImpact) { + checkIfFrozen(); + this.positiveScoreImpact = positiveScoreImpact; + } + + @Override + public Query existsQuery(QueryShardContext context) { + return new TermQuery(new Term("_feature", name())); + } + + @Override + public Query nullValueQuery() { + if (nullValue() == null) { + return null; + } + return termQuery(nullValue(), null); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { + failIfNoDocValues(); + return new DocValuesIndexFieldData.Builder(); + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + throw new UnsupportedOperationException("Queries on [feature] fields are not supported"); + } + } + + private FeatureFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, + Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); + assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0; + } + + @Override + protected FeatureFieldMapper clone() { + return (FeatureFieldMapper) super.clone(); + } + + @Override + public FeatureFieldType fieldType() { + return (FeatureFieldType) super.fieldType(); + } + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + float value; + if (context.externalValueSet()) { + Object v = context.externalValue(); + if (v instanceof Number) { + value = ((Number) v).floatValue(); + } else { + value = Float.parseFloat(v.toString()); + } + } else if (context.parser().currentToken() == Token.VALUE_NULL) { + // skip + return; + } else { + value = context.parser().floatValue(); + } + + if (context.doc().getByKey(name()) != null) { + throw new IllegalArgumentException("[feature] fields do not support indexing multiple values for the same field [" + name() + + "] in the same document"); + } + + if (fieldType().positiveScoreImpact() == false) { + value = 1 / value; + } + + context.doc().addWithKey(name(), new FeatureField("_feature", name(), value)); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { + super.doXContentBody(builder, includeDefaults, params); + + if (includeDefaults || fieldType().nullValue() != null) { + builder.field("null_value", fieldType().nullValue()); + } + + if (includeDefaults || fieldType().positiveScoreImpact() == false) { + builder.field("positive_score_impact", fieldType().positiveScoreImpact()); + } + } +} diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapper.java new file mode 100644 index 0000000000000..2102a029a6ad6 --- /dev/null +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapper.java @@ -0,0 +1,151 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * This meta field only exists because feature fields index everything into a + * common _feature field and Elasticsearch has a custom codec that complains + * when fields exist in the index and not in mappings. + */ +public class FeatureMetaFieldMapper extends MetadataFieldMapper { + + public static final String NAME = "_feature"; + + public static final String CONTENT_TYPE = "_feature"; + + public static class Defaults { + public static final MappedFieldType FIELD_TYPE = new FeatureMetaFieldType(); + + static { + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS); + FIELD_TYPE.setTokenized(true); + FIELD_TYPE.setStored(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + FIELD_TYPE.setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); + FIELD_TYPE.setName(NAME); + FIELD_TYPE.freeze(); + } + } + + public static class Builder extends MetadataFieldMapper.Builder { + + public Builder(MappedFieldType existing) { + super(NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); + } + + @Override + public FeatureMetaFieldMapper build(BuilderContext context) { + setupFieldType(context); + return new FeatureMetaFieldMapper(fieldType, context.indexSettings()); + } + } + + public static class TypeParser implements MetadataFieldMapper.TypeParser { + @Override + public MetadataFieldMapper.Builder parse(String name, + Map node, ParserContext parserContext) throws MapperParsingException { + return new Builder(parserContext.mapperService().fullName(NAME)); + } + + @Override + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); + if (fieldType != null) { + return new FeatureMetaFieldMapper(indexSettings, fieldType); + } else { + return parse(NAME, Collections.emptyMap(), context) + .build(new BuilderContext(indexSettings, new ContentPath(1))); + } + } + } + + public static final class FeatureMetaFieldType extends MappedFieldType { + + public FeatureMetaFieldType() { + } + + protected FeatureMetaFieldType(FeatureMetaFieldType ref) { + super(ref); + } + + @Override + public FeatureMetaFieldType clone() { + return new FeatureMetaFieldType(this); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public Query existsQuery(QueryShardContext context) { + throw new UnsupportedOperationException("Cannot run exists query on [_feature]"); + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + throw new UnsupportedOperationException("The [_feature] field may not be queried directly"); + } + } + + private FeatureMetaFieldMapper(Settings indexSettings, MappedFieldType existing) { + this(existing.clone(), indexSettings); + } + + private FeatureMetaFieldMapper(MappedFieldType fieldType, Settings indexSettings) { + super(NAME, fieldType, Defaults.FIELD_TYPE, indexSettings); + } + + @Override + public void preParse(ParseContext context) throws IOException {} + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + throw new AssertionError("Should never be called"); + } + + @Override + public void postParse(ParseContext context) throws IOException {} + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } +} diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java index 2b249a5fe6e09..4a9aea21a8a53 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java @@ -19,21 +19,37 @@ package org.elasticsearch.index.mapper; +import org.elasticsearch.index.mapper.MetadataFieldMapper.TypeParser; +import org.elasticsearch.index.query.FeatureQueryBuilder; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SearchPlugin; import java.util.Collections; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; -public class MapperExtrasPlugin extends Plugin implements MapperPlugin { +public class MapperExtrasPlugin extends Plugin implements MapperPlugin, SearchPlugin { @Override public Map getMappers() { Map mappers = new LinkedHashMap<>(); mappers.put(ScaledFloatFieldMapper.CONTENT_TYPE, new ScaledFloatFieldMapper.TypeParser()); mappers.put(TokenCountFieldMapper.CONTENT_TYPE, new TokenCountFieldMapper.TypeParser()); + mappers.put(FeatureFieldMapper.CONTENT_TYPE, new FeatureFieldMapper.TypeParser()); return Collections.unmodifiableMap(mappers); } + @Override + public Map getMetadataMappers() { + return Collections.singletonMap(FeatureMetaFieldMapper.CONTENT_TYPE, new FeatureMetaFieldMapper.TypeParser()); + } + + @Override + public List> getQueries() { + return Collections.singletonList( + new QuerySpec<>(FeatureQueryBuilder.NAME, FeatureQueryBuilder::new, p -> FeatureQueryBuilder.PARSER.parse(p, null))); + } + } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java new file mode 100644 index 0000000000000..761de46731dda --- /dev/null +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java @@ -0,0 +1,354 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.mapper.FeatureFieldMapper.FeatureFieldType; +import org.elasticsearch.index.mapper.MappedFieldType; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * Query to run on a [feature] field. + */ +public final class FeatureQueryBuilder extends AbstractQueryBuilder { + + /** + * Scoring function for a [feature] field. + */ + public abstract static class ScoreFunction { + + private ScoreFunction() {} // prevent extensions by users + + abstract void writeTo(StreamOutput out) throws IOException; + + abstract Query toQuery(String feature, boolean positiveScoreImpact) throws IOException; + + abstract void doXContent(XContentBuilder builder) throws IOException; + + /** + * A scoring function that scores documents as {@code Math.log(scalingFactor + S)} + * where S is the value of the static feature. + */ + public static class Log extends ScoreFunction { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "log", a -> new Log((Float) a[0])); + static { + PARSER.declareFloat(ConstructingObjectParser.constructorArg(), new ParseField("scaling_factor")); + } + + private final float scalingFactor; + + public Log(float scalingFactor) { + this.scalingFactor = scalingFactor; + } + + private Log(StreamInput in) throws IOException { + this(in.readFloat()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Log that = (Log) obj; + return scalingFactor == that.scalingFactor; + } + + @Override + public int hashCode() { + return Float.hashCode(scalingFactor); + } + + @Override + void writeTo(StreamOutput out) throws IOException { + out.writeByte((byte) 0); + out.writeFloat(scalingFactor); + } + + @Override + void doXContent(XContentBuilder builder) throws IOException { + builder.startObject("log"); + builder.field("scaling_factor", scalingFactor); + builder.endObject(); + } + + @Override + Query toQuery(String feature, boolean positiveScoreImpact) throws IOException { + if (positiveScoreImpact == false) { + throw new IllegalArgumentException("Cannot use the [log] function with a field that has a negative score impact as " + + "it would trigger negative scores"); + } + return FeatureField.newLogQuery("_feature", feature, DEFAULT_BOOST, scalingFactor); + } + } + + /** + * A scoring function that scores documents as {@code S / (S + pivot)} where S is + * the value of the static feature. + */ + public static class Saturation extends ScoreFunction { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "saturation", a -> new Saturation((Float) a[0])); + static { + PARSER.declareFloat(ConstructingObjectParser.optionalConstructorArg(), new ParseField("pivot")); + } + + private final Float pivot; + + /** Constructor with a default pivot, computed as the geometric average of + * all feature values in the index. */ + public Saturation() { + this((Float) null); + } + + public Saturation(float pivot) { + this(Float.valueOf(pivot)); + } + + private Saturation(Float pivot) { + this.pivot = pivot; + } + + private Saturation(StreamInput in) throws IOException { + this(in.readOptionalFloat()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Saturation that = (Saturation) obj; + return Objects.equals(pivot, that.pivot); + } + + @Override + public int hashCode() { + return Objects.hashCode(pivot); + } + + @Override + void writeTo(StreamOutput out) throws IOException { + out.writeByte((byte) 1); + out.writeOptionalFloat(pivot); + } + + @Override + void doXContent(XContentBuilder builder) throws IOException { + builder.startObject("saturation"); + if (pivot != null) { + builder.field("pivot", pivot); + } + builder.endObject(); + } + + @Override + Query toQuery(String feature, boolean positiveScoreImpact) throws IOException { + if (pivot == null) { + return FeatureField.newSaturationQuery("_feature", feature); + } else { + return FeatureField.newSaturationQuery("_feature", feature, DEFAULT_BOOST, pivot); + } + } + } + + /** + * A scoring function that scores documents as {@code S^exp / (S^exp + pivot^exp)} + * where S is the value of the static feature. + */ + public static class Sigmoid extends ScoreFunction { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "sigmoid", a -> new Sigmoid((Float) a[0], ((Float) a[1]).floatValue())); + static { + PARSER.declareFloat(ConstructingObjectParser.constructorArg(), new ParseField("pivot")); + PARSER.declareFloat(ConstructingObjectParser.constructorArg(), new ParseField("exponent")); + } + + private final float pivot; + private final float exp; + + public Sigmoid(float pivot, float exp) { + this.pivot = pivot; + this.exp = exp; + } + + private Sigmoid(StreamInput in) throws IOException { + this(in.readFloat(), in.readFloat()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Sigmoid that = (Sigmoid) obj; + return pivot == that.pivot + && exp == that.exp; + } + + @Override + public int hashCode() { + return Objects.hash(pivot, exp); + } + + @Override + void writeTo(StreamOutput out) throws IOException { + out.writeByte((byte) 2); + out.writeFloat(pivot); + out.writeFloat(exp); + } + + @Override + void doXContent(XContentBuilder builder) throws IOException { + builder.startObject("sigmoid"); + builder.field("pivot", pivot); + builder.field("exponent", exp); + builder.endObject(); + } + + @Override + Query toQuery(String feature, boolean positiveScoreImpact) throws IOException { + return FeatureField.newSigmoidQuery("_feature", feature, DEFAULT_BOOST, pivot, exp); + } + } + } + + private static ScoreFunction readScoreFunction(StreamInput in) throws IOException { + byte b = in.readByte(); + switch (b) { + case 0: + return new ScoreFunction.Log(in); + case 1: + return new ScoreFunction.Saturation(in); + case 2: + return new ScoreFunction.Sigmoid(in); + default: + throw new IOException("Illegal score function id: " + b); + } + } + + public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "feature", args -> { + final String field = (String) args[0]; + final float boost = args[1] == null ? DEFAULT_BOOST : (Float) args[1]; + final String queryName = (String) args[2]; + long numNonNulls = Arrays.stream(args, 3, args.length).filter(Objects::nonNull).count(); + final FeatureQueryBuilder query; + if (numNonNulls > 1) { + throw new IllegalArgumentException("Can only specify one of [log], [saturation] and [sigmoid]"); + } else if (numNonNulls == 0) { + query = new FeatureQueryBuilder(field, new ScoreFunction.Saturation()); + } else { + ScoreFunction scoreFunction = (ScoreFunction) Arrays.stream(args, 3, args.length) + .filter(Objects::nonNull) + .findAny() + .get(); + query = new FeatureQueryBuilder(field, scoreFunction); + } + query.boost(boost); + query.queryName(queryName); + return query; + }); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("field")); + PARSER.declareFloat(ConstructingObjectParser.optionalConstructorArg(), BOOST_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), NAME_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + ScoreFunction.Log.PARSER, new ParseField("log")); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + ScoreFunction.Saturation.PARSER, new ParseField("saturation")); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), + ScoreFunction.Sigmoid.PARSER, new ParseField("sigmoid")); + } + + public static final String NAME = "feature"; + + private final String field; + private final ScoreFunction scoreFunction; + + public FeatureQueryBuilder(String field, ScoreFunction scoreFunction) { + this.field = Objects.requireNonNull(field); + this.scoreFunction = Objects.requireNonNull(scoreFunction); + } + + public FeatureQueryBuilder(StreamInput in) throws IOException { + super(in); + this.field = in.readString(); + this.scoreFunction = readScoreFunction(in); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeString(field); + scoreFunction.writeTo(out); + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(getName()); + builder.field("field", field); + scoreFunction.doXContent(builder); + printBoostAndQueryName(builder); + builder.endObject(); + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + final MappedFieldType ft = context.fieldMapper(field); + if (ft == null) { + return new MatchNoDocsQuery(); + } + if (ft instanceof FeatureFieldType == false) { + throw new IllegalArgumentException("[feature] query only works on [feature] fields, not [" + ft.typeName() + "]"); + } + final FeatureFieldType fft = (FeatureFieldType) ft; + return scoreFunction.toQuery(field, fft.positiveScoreImpact()); + } + + @Override + protected boolean doEquals(FeatureQueryBuilder other) { + return Objects.equals(field, other.field) && Objects.equals(scoreFunction, other.scoreFunction); + } + + @Override + protected int doHashCode() { + return Objects.hash(field, scoreFunction); + } + +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldMapperTests.java new file mode 100644 index 0000000000000..2e9fa98cbbe97 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldMapperTests.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.TermFrequencyAttribute; +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.hamcrest.Matchers; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; + +public class FeatureFieldMapperTests extends ESSingleNodeTestCase { + + IndexService indexService; + DocumentMapperParser parser; + + @Before + public void setup() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + + private static int getFrequency(TokenStream tk) throws IOException { + TermFrequencyAttribute freqAttribute = tk.addAttribute(TermFrequencyAttribute.class); + tk.reset(); + assertTrue(tk.incrementToken()); + int freq = freqAttribute.getTermFrequency(); + assertFalse(tk.incrementToken()); + return freq; + } + + public void testDefaults() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature").endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc1 = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", 10) + .endObject()), + XContentType.JSON)); + + IndexableField[] fields = doc1.rootDoc().getFields("_feature"); + assertEquals(1, fields.length); + assertThat(fields[0], Matchers.instanceOf(FeatureField.class)); + FeatureField featureField1 = (FeatureField) fields[0]; + + ParsedDocument doc2 = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", 12) + .endObject()), + XContentType.JSON)); + + FeatureField featureField2 = (FeatureField) doc2.rootDoc().getFields("_feature")[0]; + + int freq1 = getFrequency(featureField1.tokenStream(null, null)); + int freq2 = getFrequency(featureField2.tokenStream(null, null)); + assertTrue(freq1 < freq2); + } + + public void testNegativeScoreImpact() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature") + .field("positive_score_impact", false).endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc1 = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", 10) + .endObject()), + XContentType.JSON)); + + IndexableField[] fields = doc1.rootDoc().getFields("_feature"); + assertEquals(1, fields.length); + assertThat(fields[0], Matchers.instanceOf(FeatureField.class)); + FeatureField featureField1 = (FeatureField) fields[0]; + + ParsedDocument doc2 = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", 12) + .endObject()), + XContentType.JSON)); + + FeatureField featureField2 = (FeatureField) doc2.rootDoc().getFields("_feature")[0]; + + int freq1 = getFrequency(featureField1.tokenStream(null, null)); + int freq2 = getFrequency(featureField2.tokenStream(null, null)); + assertTrue(freq1 > freq2); + } + + public void testRejectMultiValuedFields() throws MapperParsingException, IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature").endObject().startObject("foo") + .startObject("properties").startObject("field").field("type", "feature").endObject().endObject() + .endObject().endObject().endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + MapperParsingException e = null;/*expectThrows(MapperParsingException.class, + () -> mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("field", Arrays.asList(10, 20)) + .endObject()), + XContentType.JSON))); + assertEquals("[feature] fields do not support indexing multiple values for the same field [field] in the same document", + e.getCause().getMessage());*/ + + e = expectThrows(MapperParsingException.class, + () -> mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .startArray("foo") + .startObject() + .field("field", 10) + .endObject() + .startObject() + .field("field", 20) + .endObject() + .endArray() + .endObject()), + XContentType.JSON))); + assertEquals("[feature] fields do not support indexing multiple values for the same field [foo.field] in the same document", + e.getCause().getMessage()); + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldTypeTests.java new file mode 100644 index 0000000000000..9debd0736602c --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureFieldTypeTests.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.junit.Before; + +public class FeatureFieldTypeTests extends FieldTypeTestCase { + + @Override + protected MappedFieldType createDefaultFieldType() { + return new FeatureFieldMapper.FeatureFieldType(); + } + + @Before + public void setupProperties() { + addModifier(new Modifier("positive_score_impact", false) { + @Override + public void modify(MappedFieldType ft) { + FeatureFieldMapper.FeatureFieldType tft = (FeatureFieldMapper.FeatureFieldType)ft; + tft.setPositiveScoreImpact(tft.positiveScoreImpact() == false); + } + @Override + public void normalizeOther(MappedFieldType other) { + super.normalizeOther(other); + ((FeatureFieldMapper.FeatureFieldType) other).setPositiveScoreImpact(true); + } + }); + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapperTests.java new file mode 100644 index 0000000000000..99697b1abaf58 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldMapperTests.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.Before; + +import java.util.Collection; + +public class FeatureMetaFieldMapperTests extends ESSingleNodeTestCase { + + IndexService indexService; + DocumentMapperParser parser; + + @Before + public void setup() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + + public void testBasics() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature").endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + assertNotNull(mapper.metadataMapper(FeatureMetaFieldMapper.class)); + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldTypeTests.java new file mode 100644 index 0000000000000..ef261573c9682 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureMetaFieldTypeTests.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +public class FeatureMetaFieldTypeTests extends FieldTypeTestCase { + + @Override + protected MappedFieldType createDefaultFieldType() { + return new FeatureMetaFieldMapper.FeatureMetaFieldType(); + } + +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java new file mode 100644 index 0000000000000..883dce5f3858c --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.index.mapper.MapperExtrasPlugin; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.FeatureQueryBuilder.ScoreFunction; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.test.AbstractQueryTestCase; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.Matchers.either; + +public class FeatureQueryBuilderTests extends AbstractQueryTestCase { + + @Override + protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { + for (String type : getCurrentTypes()) { + mapperService.merge(type, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(type, + "my_feature_field", "type=feature", + "my_negative_feature_field", "type=feature,positive_score_impact=false"))), MapperService.MergeReason.MAPPING_UPDATE); + } + } + + @Override + protected Collection> getPlugins() { + return Collections.singleton(MapperExtrasPlugin.class); + } + + @Override + protected FeatureQueryBuilder doCreateTestQueryBuilder() { + ScoreFunction function; + switch (random().nextInt(3)) { + case 0: + function = new ScoreFunction.Log(1 + randomFloat()); + break; + case 1: + if (randomBoolean()) { + function = new ScoreFunction.Saturation(); + } else { + function = new ScoreFunction.Saturation(randomFloat()); + } + break; + case 2: + function = new ScoreFunction.Sigmoid(randomFloat(), randomFloat()); + break; + default: + throw new AssertionError(); + } + return new FeatureQueryBuilder("my_feature_field", function); + } + + @Override + protected void doAssertLuceneQuery(FeatureQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { + Class expectedClass = FeatureField.newSaturationQuery("", "", 1, 1).getClass(); + assertThat(query, either(instanceOf(MatchNoDocsQuery.class)).or(instanceOf(expectedClass))); + } + + @Override + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/30605") + public void testUnknownField() { + super.testUnknownField(); + } + + public void testDefaultScoreFunction() throws IOException { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + String query = "{\n" + + " \"feature\" : {\n" + + " \"field\": \"my_feature_field\"\n" + + " }\n" + + "}"; + Query parsedQuery = parseQuery(query).toQuery(createShardContext()); + assertEquals(FeatureField.newSaturationQuery("_feature", "my_feature_field"), parsedQuery); + } + + public void testIllegalField() throws IOException { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + String query = "{\n" + + " \"feature\" : {\n" + + " \"field\": \"" + STRING_FIELD_NAME + "\"\n" + + " }\n" + + "}"; + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(query).toQuery(createShardContext())); + assertEquals("[feature] query only works on [feature] fields, not [text]", e.getMessage()); + } + + public void testIllegalCombination() throws IOException { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + String query = "{\n" + + " \"feature\" : {\n" + + " \"field\": \"my_negative_feature_field\",\n" + + " \"log\" : {\n" + + " \"scaling_factor\": 4.5\n" + + " }\n" + + " }\n" + + "}"; + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parseQuery(query).toQuery(createShardContext())); + assertEquals( + "Cannot use the [log] function with a field that has a negative score impact as it would trigger negative scores", + e.getMessage()); + } +} diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature/10_basic.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature/10_basic.yml new file mode 100644 index 0000000000000..8318550876509 --- /dev/null +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature/10_basic.yml @@ -0,0 +1,160 @@ +setup: + - skip: + version: " - 6.99.99" + reason: "The feature field/query was introduced in 7.0.0" + + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + _doc: + properties: + pagerank: + type: feature + url_length: + type: feature + positive_score_impact: false + + - do: + index: + index: test + type: _doc + id: 1 + body: + pagerank: 10 + url_length: 50 + + - do: + index: + index: test + type: _doc + id: 2 + body: + pagerank: 100 + url_length: 20 + + - do: + indices.refresh: {} + +--- +"Positive log": + + - do: + search: + body: + query: + feature: + field: pagerank + log: + scaling_factor: 3 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Positive saturation": + + - do: + search: + body: + query: + feature: + field: pagerank + saturation: + pivot: 20 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Positive sigmoid": + + - do: + search: + body: + query: + feature: + field: pagerank + sigmoid: + pivot: 20 + exponent: 0.6 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Negative log": + + - do: + catch: bad_request + search: + body: + query: + feature: + field: url_length + log: + scaling_factor: 3 + +--- +"Negative saturation": + + - do: + search: + body: + query: + feature: + field: url_length + saturation: + pivot: 20 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Negative sigmoid": + + - do: + search: + body: + query: + feature: + field: url_length + sigmoid: + pivot: 20 + exponent: 0.6 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" From cceaa9a0f1213c2a1583c658644545df23b6a86d Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 23 May 2018 08:56:32 +0200 Subject: [PATCH 21/22] Only ack cluster state updates successfully applied on all nodes (#30672) The cluster state acking mechanism currently incorrectly acks cluster state updates that have not successfully been applied on all nodes. In a situation, for example, where some of the nodes disconnect during publishing, and don't acknowledge receiving the new cluster state, the user-facing action (e.g. create index request) will still consider this as an ack. --- .../AckedClusterStateTaskListener.java | 6 ++- .../cluster/AckedClusterStateUpdateTask.java | 2 +- .../metadata/MetaDataMappingService.java | 2 +- .../cluster/service/MasterService.java | 16 +++---- .../ack/AckClusterUpdateSettingsIT.java | 42 +++++++++++++++++++ 5 files changed, 55 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java b/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java index 148a1dea3095f..a4767507ef1aa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateTaskListener.java @@ -25,7 +25,11 @@ public interface AckedClusterStateTaskListener extends ClusterStateTaskListener { /** - * Called to determine which nodes the acknowledgement is expected from + * Called to determine which nodes the acknowledgement is expected from. + * + * As this method will be called multiple times to determine the set of acking nodes, + * it is crucial for it to return consistent results: Given the same listener instance + * and the same node parameter, the method implementation should return the same result. * * @param discoveryNode a node * @return true if the node is expected to send ack back, false otherwise diff --git a/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java b/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java index faf2f30bb3ed4..8d61fe964265d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/AckedClusterStateUpdateTask.java @@ -61,7 +61,7 @@ public boolean mustAck(DiscoveryNode discoveryNode) { * @param e optional error that might have been thrown */ public void onAllNodesAcked(@Nullable Exception e) { - listener.onResponse(newResponse(true)); + listener.onResponse(newResponse(e == null)); } protected abstract Response newResponse(boolean acknowledged); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index b8e898cf6f5e3..82d947b4158a2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -363,7 +363,7 @@ public boolean mustAck(DiscoveryNode discoveryNode) { @Override public void onAllNodesAcked(@Nullable Exception e) { - listener.onResponse(new ClusterStateUpdateResponse(true)); + listener.onResponse(new ClusterStateUpdateResponse(e == null)); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index 54a6568af3fa2..1757548c28b09 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -563,7 +563,7 @@ private static class AckCountDownListener implements Discovery.AckListener { private final AckedClusterStateTaskListener ackedTaskListener; private final CountDown countDown; - private final DiscoveryNodes nodes; + private final DiscoveryNode masterNode; private final long clusterStateVersion; private final Future ackTimeoutCallback; private Exception lastFailure; @@ -572,15 +572,14 @@ private static class AckCountDownListener implements Discovery.AckListener { ThreadPool threadPool) { this.ackedTaskListener = ackedTaskListener; this.clusterStateVersion = clusterStateVersion; - this.nodes = nodes; + this.masterNode = nodes.getMasterNode(); int countDown = 0; for (DiscoveryNode node : nodes) { - if (ackedTaskListener.mustAck(node)) { + //we always wait for at least the master node + if (node.equals(masterNode) || ackedTaskListener.mustAck(node)) { countDown++; } } - //we always wait for at least 1 node (the master) - countDown = Math.max(1, countDown); logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion); this.countDown = new CountDown(countDown); this.ackTimeoutCallback = threadPool.schedule(ackedTaskListener.ackTimeout(), ThreadPool.Names.GENERIC, () -> onTimeout()); @@ -588,11 +587,8 @@ private static class AckCountDownListener implements Discovery.AckListener { @Override public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { - if (!ackedTaskListener.mustAck(node)) { - //we always wait for the master ack anyway - if (!node.equals(nodes.getMasterNode())) { - return; - } + if (node.equals(masterNode) == false && ackedTaskListener.mustAck(node) == false) { + return; } if (e == null) { logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion); diff --git a/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java b/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java index ab3f82fff75f5..a11ceddf28788 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; @@ -33,8 +34,16 @@ import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.PublishClusterStateAction; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.stream.Stream; import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -43,6 +52,11 @@ @ClusterScope(scope = TEST, minNumDataNodes = 2) public class AckClusterUpdateSettingsIT extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() @@ -156,4 +170,32 @@ public void testOpenIndexNoAcknowledgement() { assertThat(openIndexResponse.isAcknowledged(), equalTo(false)); ensureGreen("test"); // make sure that recovery from disk has completed, so that check index doesn't fail. } + + public void testAckingFailsIfNotPublishedToAllNodes() { + String masterNode = internalCluster().getMasterName(); + String nonMasterNode = Stream.of(internalCluster().getNodeNames()) + .filter(node -> node.equals(masterNode) == false).findFirst().get(); + + MockTransportService masterTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, masterNode); + MockTransportService nonMasterTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, nonMasterNode); + + logger.info("blocking cluster state publishing from master [{}] to non master [{}]", masterNode, nonMasterNode); + if (randomBoolean() && internalCluster().numMasterNodes() != 2) { + masterTransportService.addFailToSendNoConnectRule(nonMasterTransportService, PublishClusterStateAction.SEND_ACTION_NAME); + } else { + masterTransportService.addFailToSendNoConnectRule(nonMasterTransportService, PublishClusterStateAction.COMMIT_ACTION_NAME); + } + + CreateIndexResponse response = client().admin().indices().prepareCreate("test").get(); + assertFalse(response.isAcknowledged()); + + logger.info("waiting for cluster to reform"); + masterTransportService.clearRule(nonMasterTransportService); + + ensureStableCluster(internalCluster().size()); + + assertAcked(client().admin().indices().prepareDelete("test")); + } } From 30b004f582fa44d630fe1a910089d0821e6fcc26 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 23 May 2018 09:02:01 +0200 Subject: [PATCH 22/22] Use original settings on full-cluster restart (#30780) When doing a node restart using the test framework, the restarted node does not only use the settings provided to the original node, but also additional settings provided by plugin extensions, which does not correspond to the settings that a node would have on a true restart. --- .../java/org/elasticsearch/node/Node.java | 11 +++++++- .../test/InternalTestCluster.java | 2 +- .../test/test/InternalTestClusterTests.java | 26 ++++++++++++++++++- .../xpack/ml/MachineLearning.java | 8 ++---- 4 files changed, 38 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 054b91dc51101..44ecb6b04d627 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -230,6 +230,7 @@ public static final Settings addNodeNameIfNeeded(Settings settings, final String private final Lifecycle lifecycle = new Lifecycle(); private final Injector injector; private final Settings settings; + private final Settings originalSettings; private final Environment environment; private final NodeEnvironment nodeEnvironment; private final PluginsService pluginsService; @@ -260,6 +261,7 @@ protected Node(final Environment environment, Collection logger.info("initializing ..."); } try { + originalSettings = environment.settings(); Settings tmpSettings = Settings.builder().put(environment.settings()) .put(Client.CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE).build(); @@ -563,7 +565,14 @@ protected void processRecoverySettings(ClusterSettings clusterSettings, Recovery } /** - * The settings that were used to create the node. + * The original settings that were used to create the node + */ + public Settings originalSettings() { + return originalSettings; + } + + /** + * The settings that are used by this node. Contains original settings as well as additional settings provided by plugins. */ public Settings settings() { return this.settings; diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index c786107361671..efe775f7415c2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -906,7 +906,7 @@ private void clearDataIfNeeded(RestartCallback callback) throws IOException { private void createNewNode(final Settings newSettings) { final long newIdSeed = NodeEnvironment.NODE_ID_SEED_SETTING.get(node.settings()) + 1; // use a new seed to make sure we have new node id - Settings finalSettings = Settings.builder().put(node.settings()).put(newSettings).put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build(); + Settings finalSettings = Settings.builder().put(node.originalSettings()).put(newSettings).put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), newIdSeed).build(); if (DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(finalSettings) == false) { throw new IllegalStateException(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " is not configured after restart of [" + name + "]"); diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index e25f917d69958..23f44c560baeb 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -469,9 +469,11 @@ public Settings transportClientSettings() { }; String nodePrefix = "test"; Path baseDir = createTempDir(); + List> plugins = new ArrayList<>(mockPlugins()); + plugins.add(NodeAttrCheckPlugin.class); InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, false, true, 2, 2, "test", nodeConfigurationSource, 0, nodePrefix, - mockPlugins(), Function.identity()); + plugins, Function.identity()); try { cluster.beforeTest(random(), 0.0); assertMMNinNodeSetting(cluster, 2); @@ -502,4 +504,26 @@ public Settings onNodeStopped(String nodeName) throws Exception { cluster.close(); } } + + /** + * Plugin that adds a simple node attribute as setting and checks if that node attribute is not already defined. + * Allows to check that the full-cluster restart logic does not copy over plugin-derived settings. + */ + public static class NodeAttrCheckPlugin extends Plugin { + + private final Settings settings; + + public NodeAttrCheckPlugin(Settings settings) { + this.settings = settings; + } + + @Override + public Settings additionalSettings() { + if (settings.get("node.attr.dummy") != null) { + fail("dummy setting already exists"); + } + return Settings.builder().put("node.attr.dummy", true).build(); + } + + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index bdefabdb294e5..a1714a8e3f5db 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -316,12 +316,8 @@ public Settings additionalSettings() { } private void addMlNodeAttribute(Settings.Builder additionalSettings, String attrName, String value) { - // Unfortunately we cannot simply disallow any value, because the internal cluster integration - // test framework will restart nodes with settings copied from the node immediately before it - // was stopped. The best we can do is reject inconsistencies, and report this in a way that - // makes clear that setting the node attribute directly is not allowed. String oldValue = settings.get(attrName); - if (oldValue == null || oldValue.equals(value)) { + if (oldValue == null) { additionalSettings.put(attrName, value); } else { reportClashingNodeAttribute(attrName); @@ -487,7 +483,7 @@ public List getRestHandlers(Settings settings, RestController restC new RestStartDatafeedAction(settings, restController), new RestStopDatafeedAction(settings, restController), new RestDeleteModelSnapshotAction(settings, restController), - new RestDeleteExpiredDataAction(settings, restController), + new RestDeleteExpiredDataAction(settings, restController), new RestForecastJobAction(settings, restController), new RestGetCalendarsAction(settings, restController), new RestPutCalendarAction(settings, restController),