diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 609a7cf2ea66f..c3956ca9d40a7 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -505,8 +505,6 @@ - - diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 6009021da14ed..7aedd395b93b5 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.4.0-snapshot-1cbadda4d3 +lucene = 7.4.0-snapshot-0a7c3f462f # optional dependencies spatial4j = 0.7 diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 8567ed63b3418..b38a554d6815b 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -63,7 +63,7 @@ include::{xes-repo-dir}/monitoring/index.asciidoc[] include::{xes-repo-dir}/rollup/index.asciidoc[] -include::{xes-repo-dir}/rest-api/index.asciidoc[] +include::rest-api/index.asciidoc[] include::{xes-repo-dir}/commands/index.asciidoc[] diff --git a/docs/reference/query-dsl/query-string-syntax.asciidoc b/docs/reference/query-dsl/query-string-syntax.asciidoc index 937fcdae5fed6..765b54b588359 100644 --- a/docs/reference/query-dsl/query-string-syntax.asciidoc +++ b/docs/reference/query-dsl/query-string-syntax.asciidoc @@ -254,7 +254,6 @@ would look like this: } } -**** ===== Grouping diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc new file mode 100644 index 0000000000000..8c58246a0a658 --- /dev/null +++ b/docs/reference/rest-api/index.asciidoc @@ -0,0 +1,29 @@ +[role="xpack"] +[[xpack-api]] += {xpack} APIs + +[partintro] +-- +{xpack} exposes REST APIs that are used by the UI components and can be called +directly to configure and access {xpack} features. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +-- + + +include::{xes-repo-dir}/rest-api/info.asciidoc[] +include::{xes-repo-dir}/rest-api/graph/explore.asciidoc[] +include::{xes-repo-dir}/rest-api/licensing.asciidoc[] +include::{xes-repo-dir}/rest-api/migration.asciidoc[] +include::{xes-repo-dir}/rest-api/ml-api.asciidoc[] +include::{xes-repo-dir}/rest-api/rollup-api.asciidoc[] +include::{xes-repo-dir}/rest-api/security.asciidoc[] +include::{xes-repo-dir}/rest-api/watcher.asciidoc[] +include::{xes-repo-dir}/rest-api/defs.asciidoc[] diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..25e2291d36e8b --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +bf2cfa0551ebdf08a2cf3079f3c74643bd9dbb76 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 3bbaa2ba0a715..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -98c920972b2f5e8563540e805d87e6a3bc888972 \ No newline at end of file diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java index 084f29b8c9a87..1be1acb1317dc 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java @@ -131,6 +131,7 @@ public void testXContentRoundtrip() throws IOException { } } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/31104") public void testXContentParsingIsNotLenient() throws IOException { RatedRequest testItem = createTestItem(randomBoolean()); XContentType xContentType = randomFrom(XContentType.values()); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfig.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfig.java index 9c81c07e66314..939d5540ecfdf 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfig.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfig.java @@ -76,7 +76,8 @@ public boolean isCorsSupportEnabled() { } /** - * Determines whether a wildcard origin, '*', is supported. + * Determines whether a wildcard origin, '*', is supported. This also means that null origins are + * supported. * * @return {@code boolean} true if any origin is allowed. */ @@ -121,21 +122,21 @@ public boolean isNullOriginAllowed() { } /** - * Determines if cookies are supported for CORS requests. + * Determines if credentials are supported for CORS requests. * - * By default cookies are not included in CORS requests but if isCredentialsAllowed returns - * true cookies will be added to CORS requests. Setting this value to true will set the + * By default credentials are not included in CORS requests but if isCredentialsAllowed returns + * true credentials will be added to CORS requests. Setting this value to true will set the * CORS 'Access-Control-Allow-Credentials' response header to true. * - * Please note that cookie support needs to be enabled on the client side as well. - * The client needs to opt-in to send cookies by calling: + * Please note that credentials support needs to be enabled on the client side as well. + * The client needs to opt-in to send credentials by calling: *
      * xhr.withCredentials = true;
      * 
- * The default value for 'withCredentials' is false in which case no cookies are sent. - * Setting this to true will included cookies in cross origin requests. + * The default value for 'withCredentials' is false in which case no credentials are sent. + * Setting this to true will included credentials in cross origin requests. * - * @return {@code true} if cookies are supported. + * @return {@code true} if credentials are supported. */ public boolean isCredentialsAllowed() { return allowCredentials; diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..3fdd3366122cb --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +82d83fcac1d9c8948aa0247fc9c87f177ddbd59b \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 7f3d3b5ccf63c..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -844e2b76f4bc6e646e1c3257d668ac598e03f36a \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..855d6ebe4aeb0 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +73fd4364f2931e7c8303b5927e140a7d21116c36 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 65423fff2a441..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2f2bd2d67c7952e4ae14ab3f742824a45d0d1719 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..091097f1a8477 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +0a2c4417fa9a8be078864f590a5a66b98d551cf5 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 04fa62ce64a1d..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -46ad7ebcfcdbdb60dd54aae4d720356a7a51c7c0 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..b18addf0b5819 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +6fa179924f139a30fc0e5399256e1a44562ed32b \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 55bc8869196e0..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -548e9f2b4d4a985dc174b2eee4007c0bd5642e68 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..7b7141b6f407c --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +5ed135d34d7868b71a725257a46dc8d8735a15d4 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index be66854321699..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b90e66f4104f0234cfef335762f65a6fed695231 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..73be96c477eab --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +875911b36b99c2103719f94559878a0ecb862fb6 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index b77acdc34f31c..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -929a4eb52b11f6d3f0df9c8eba014f5ee2464c67 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..0c85d3f6c8522 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +e7191628df8cb72382a20da79224aef677117849 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index cce4b6ff18df5..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e6575a411b65cd95e0e54f04d3da278b68be521 \ No newline at end of file diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java index 681736a311db5..49e560363089b 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java @@ -36,6 +36,8 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpPipelinedRequest; +import org.elasticsearch.http.nio.cors.NioCorsConfig; +import org.elasticsearch.http.nio.cors.NioCorsHandler; import org.elasticsearch.nio.FlushOperation; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioSocketChannel; @@ -50,6 +52,8 @@ import java.util.List; import java.util.function.BiConsumer; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; + public class HttpReadWriteHandler implements ReadWriteHandler { private final NettyAdaptor adaptor; @@ -57,14 +61,16 @@ public class HttpReadWriteHandler implements ReadWriteHandler { private final NioHttpServerTransport transport; private final HttpHandlingSettings settings; private final NamedXContentRegistry xContentRegistry; + private final NioCorsConfig corsConfig; private final ThreadContext threadContext; HttpReadWriteHandler(NioSocketChannel nioChannel, NioHttpServerTransport transport, HttpHandlingSettings settings, - NamedXContentRegistry xContentRegistry, ThreadContext threadContext) { + NamedXContentRegistry xContentRegistry, NioCorsConfig corsConfig, ThreadContext threadContext) { this.nioChannel = nioChannel; this.transport = transport; this.settings = settings; this.xContentRegistry = xContentRegistry; + this.corsConfig = corsConfig; this.threadContext = threadContext; List handlers = new ArrayList<>(5); @@ -78,6 +84,9 @@ public class HttpReadWriteHandler implements ReadWriteHandler { if (settings.isCompression()) { handlers.add(new HttpContentCompressor(settings.getCompressionLevel())); } + if (settings.isCorsEnabled()) { + handlers.add(new NioCorsHandler(corsConfig)); + } handlers.add(new NioHttpPipeliningHandler(transport.getLogger(), settings.getPipeliningMaxEvents())); adaptor = new NettyAdaptor(handlers.toArray(new ChannelHandler[0])); @@ -178,7 +187,7 @@ private void handleRequest(Object msg) { int sequence = pipelinedRequest.getSequence(); BigArrays bigArrays = transport.getBigArrays(); try { - innerChannel = new NioHttpChannel(nioChannel, bigArrays, httpRequest, sequence, settings, threadContext); + innerChannel = new NioHttpChannel(nioChannel, bigArrays, httpRequest, sequence, settings, corsConfig, threadContext); } catch (final IllegalArgumentException e) { if (badRequestCause == null) { badRequestCause = e; @@ -191,7 +200,7 @@ private void handleRequest(Object msg) { Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters copiedRequest.uri(), copiedRequest); - innerChannel = new NioHttpChannel(nioChannel, bigArrays, innerRequest, sequence, settings, threadContext); + innerChannel = new NioHttpChannel(nioChannel, bigArrays, innerRequest, sequence, settings, corsConfig, threadContext); } channel = innerChannel; } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java index 61cafed86a51c..634421b34ea48 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java @@ -41,6 +41,8 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.http.nio.cors.NioCorsConfig; +import org.elasticsearch.http.nio.cors.NioCorsHandler; import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestResponse; @@ -58,17 +60,19 @@ public class NioHttpChannel extends AbstractRestChannel { private final BigArrays bigArrays; private final int sequence; + private final NioCorsConfig corsConfig; private final ThreadContext threadContext; private final FullHttpRequest nettyRequest; private final NioSocketChannel nioChannel; private final boolean resetCookies; NioHttpChannel(NioSocketChannel nioChannel, BigArrays bigArrays, NioHttpRequest request, int sequence, - HttpHandlingSettings settings, ThreadContext threadContext) { + HttpHandlingSettings settings, NioCorsConfig corsConfig, ThreadContext threadContext) { super(request, settings.getDetailedErrorsEnabled()); this.nioChannel = nioChannel; this.bigArrays = bigArrays; this.sequence = sequence; + this.corsConfig = corsConfig; this.threadContext = threadContext; this.nettyRequest = request.getRequest(); this.resetCookies = settings.isResetCookies(); @@ -87,6 +91,8 @@ public void sendResponse(RestResponse response) { } resp.setStatus(getStatus(response.status())); + NioCorsHandler.setCorsResponseHeaders(nettyRequest, resp, corsConfig); + String opaque = nettyRequest.headers().get("X-Opaque-Id"); if (opaque != null) { setHeaderField(resp, "X-Opaque-Id", opaque); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java index de5c166de3f5b..ce0ed83aad420 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java @@ -19,6 +19,7 @@ package org.elasticsearch.http.nio; +import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.timeout.ReadTimeoutException; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -28,6 +29,7 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; @@ -38,11 +40,13 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.AbstractHttpServerTransport; import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpStats; -import org.elasticsearch.http.AbstractHttpServerTransport; +import org.elasticsearch.http.nio.cors.NioCorsConfig; +import org.elasticsearch.http.nio.cors.NioCorsConfigBuilder; import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.AcceptorEventHandler; import org.elasticsearch.nio.BytesChannelContext; @@ -56,6 +60,7 @@ import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.SocketEventHandler; import org.elasticsearch.nio.SocketSelector; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -64,15 +69,23 @@ import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import java.util.regex.Pattern; import static org.elasticsearch.common.settings.Setting.intSetting; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; @@ -86,6 +99,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; +import static org.elasticsearch.http.nio.cors.NioCorsHandler.ANY_ORIGIN; public class NioHttpServerTransport extends AbstractHttpServerTransport { @@ -115,6 +129,7 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { private final Set socketChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); private NioGroup nioGroup; private HttpChannelFactory channelFactory; + private final NioCorsConfig corsConfig; public NioHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, HttpServerTransport.Dispatcher dispatcher) { @@ -136,6 +151,7 @@ public NioHttpServerTransport(Settings settings, NetworkService networkService, SETTING_HTTP_COMPRESSION_LEVEL.get(settings), SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings), pipeliningMaxEvents); + this.corsConfig = buildCorsConfig(settings); this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings); this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings); @@ -279,6 +295,38 @@ protected void nonChannelExceptionCaught(Exception ex) { logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), ex); } + static NioCorsConfig buildCorsConfig(Settings settings) { + if (SETTING_CORS_ENABLED.get(settings) == false) { + return NioCorsConfigBuilder.forOrigins().disable().build(); + } + String origin = SETTING_CORS_ALLOW_ORIGIN.get(settings); + final NioCorsConfigBuilder builder; + if (Strings.isNullOrEmpty(origin)) { + builder = NioCorsConfigBuilder.forOrigins(); + } else if (origin.equals(ANY_ORIGIN)) { + builder = NioCorsConfigBuilder.forAnyOrigin(); + } else { + Pattern p = RestUtils.checkCorsSettingForRegex(origin); + if (p == null) { + builder = NioCorsConfigBuilder.forOrigins(RestUtils.corsSettingAsArray(origin)); + } else { + builder = NioCorsConfigBuilder.forPattern(p); + } + } + if (SETTING_CORS_ALLOW_CREDENTIALS.get(settings)) { + builder.allowCredentials(); + } + String[] strMethods = Strings.tokenizeToStringArray(SETTING_CORS_ALLOW_METHODS.get(settings), ","); + HttpMethod[] methods = Arrays.stream(strMethods) + .map(HttpMethod::valueOf) + .toArray(HttpMethod[]::new); + return builder.allowedRequestMethods(methods) + .maxAge(SETTING_CORS_MAX_AGE.get(settings)) + .allowedRequestHeaders(Strings.tokenizeToStringArray(SETTING_CORS_ALLOW_HEADERS.get(settings), ",")) + .shortCircuit() + .build(); + } + private void closeChannels(List channels) { List> futures = new ArrayList<>(channels.size()); @@ -315,7 +363,7 @@ private HttpChannelFactory() { public NioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { NioSocketChannel nioChannel = new NioSocketChannel(channel); HttpReadWriteHandler httpReadWritePipeline = new HttpReadWriteHandler(nioChannel,NioHttpServerTransport.this, - httpHandlingSettings, xContentRegistry, threadPool.getThreadContext()); + httpHandlingSettings, xContentRegistry, corsConfig, threadPool.getThreadContext()); Consumer exceptionHandler = (e) -> exceptionCaught(nioChannel, e); SocketChannelContext context = new BytesChannelContext(nioChannel, selector, exceptionHandler, httpReadWritePipeline, InboundChannelBuffer.allocatingInstance()); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfig.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfig.java new file mode 100644 index 0000000000000..9848c26022e37 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfig.java @@ -0,0 +1,236 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio.cors; + +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.EmptyHttpHeaders; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; + +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.regex.Pattern; + +/** + * Configuration for Cross-Origin Resource Sharing (CORS). + * + * This class was lifted from the Netty project: + * https://github.com/netty/netty + */ +public final class NioCorsConfig { + + private final Optional> origins; + private final Optional pattern; + private final boolean anyOrigin; + private final boolean enabled; + private final boolean allowCredentials; + private final long maxAge; + private final Set allowedRequestMethods; + private final Set allowedRequestHeaders; + private final boolean allowNullOrigin; + private final Map> preflightHeaders; + private final boolean shortCircuit; + + NioCorsConfig(final NioCorsConfigBuilder builder) { + origins = builder.origins.map(s -> new LinkedHashSet<>(s)); + pattern = builder.pattern; + anyOrigin = builder.anyOrigin; + enabled = builder.enabled; + allowCredentials = builder.allowCredentials; + maxAge = builder.maxAge; + allowedRequestMethods = builder.requestMethods; + allowedRequestHeaders = builder.requestHeaders; + allowNullOrigin = builder.allowNullOrigin; + preflightHeaders = builder.preflightHeaders; + shortCircuit = builder.shortCircuit; + } + + /** + * Determines if support for CORS is enabled. + * + * @return {@code true} if support for CORS is enabled, false otherwise. + */ + public boolean isCorsSupportEnabled() { + return enabled; + } + + /** + * Determines whether a wildcard origin, '*', is supported. This also means that null origins are + * supported. + * + * @return {@code boolean} true if any origin is allowed. + */ + public boolean isAnyOriginSupported() { + return anyOrigin; + } + + /** + * Returns the set of allowed origins. + * + * @return {@code Set} the allowed origins. + */ + public Optional> origins() { + return origins; + } + + /** + * Returns whether the input origin is allowed by this configuration. + * + * @return {@code true} if the origin is allowed, otherwise {@code false} + */ + public boolean isOriginAllowed(final String origin) { + if (origins.isPresent()) { + return origins.get().contains(origin); + } else if (pattern.isPresent()) { + return pattern.get().matcher(origin).matches(); + } + return false; + } + + /** + * Web browsers may set the 'Origin' request header to 'null' if a resource is loaded + * from the local file system. + * + * If isNullOriginAllowed is true then the server will response with the wildcard for the + * the CORS response header 'Access-Control-Allow-Origin'. + * + * @return {@code true} if a 'null' origin should be supported. + */ + public boolean isNullOriginAllowed() { + return allowNullOrigin; + } + + /** + * Determines if credentials are supported for CORS requests. + * + * By default credentials are not included in CORS requests but if isCredentialsAllowed returns + * true credentials will be added to CORS requests. Setting this value to true will set the + * CORS 'Access-Control-Allow-Credentials' response header to true. + * + * Please note that credentials support needs to be enabled on the client side as well. + * The client needs to opt-in to send credentials by calling: + *
+     * xhr.withCredentials = true;
+     * 
+ * The default value for 'withCredentials' is false in which case no credentials are sent. + * Setting this to true will included cookies in cross origin requests. + * + * @return {@code true} if credentials are supported. + */ + public boolean isCredentialsAllowed() { + return allowCredentials; + } + + /** + * Gets the maxAge setting. + * + * When making a preflight request the client has to perform two request with can be inefficient. + * This setting will set the CORS 'Access-Control-Max-Age' response header and enables the + * caching of the preflight response for the specified time. During this time no preflight + * request will be made. + * + * @return {@code long} the time in seconds that a preflight request may be cached. + */ + public long maxAge() { + return maxAge; + } + + /** + * Returns the allowed set of Request Methods. The Http methods that should be returned in the + * CORS 'Access-Control-Request-Method' response header. + * + * @return {@code Set} of {@link HttpMethod}s that represent the allowed Request Methods. + */ + public Set allowedRequestMethods() { + return Collections.unmodifiableSet(allowedRequestMethods); + } + + /** + * Returns the allowed set of Request Headers. + * + * The header names returned from this method will be used to set the CORS + * 'Access-Control-Allow-Headers' response header. + * + * @return {@code Set} of strings that represent the allowed Request Headers. + */ + public Set allowedRequestHeaders() { + return Collections.unmodifiableSet(allowedRequestHeaders); + } + + /** + * Returns HTTP response headers that should be added to a CORS preflight response. + * + * @return {@link HttpHeaders} the HTTP response headers to be added. + */ + public HttpHeaders preflightResponseHeaders() { + if (preflightHeaders.isEmpty()) { + return EmptyHttpHeaders.INSTANCE; + } + final HttpHeaders preflightHeaders = new DefaultHttpHeaders(); + for (Map.Entry> entry : this.preflightHeaders.entrySet()) { + final Object value = getValue(entry.getValue()); + if (value instanceof Iterable) { + preflightHeaders.add(entry.getKey().toString(), (Iterable) value); + } else { + preflightHeaders.add(entry.getKey().toString(), value); + } + } + return preflightHeaders; + } + + /** + * Determines whether a CORS request should be rejected if it's invalid before being + * further processing. + * + * CORS headers are set after a request is processed. This may not always be desired + * and this setting will check that the Origin is valid and if it is not valid no + * further processing will take place, and a error will be returned to the calling client. + * + * @return {@code true} if a CORS request should short-circuit upon receiving an invalid Origin header. + */ + public boolean isShortCircuit() { + return shortCircuit; + } + + private static T getValue(final Callable callable) { + try { + return callable.call(); + } catch (final Exception e) { + throw new IllegalStateException("Could not generate value for callable [" + callable + ']', e); + } + } + + @Override + public String toString() { + return "CorsConfig[enabled=" + enabled + + ", origins=" + origins + + ", anyOrigin=" + anyOrigin + + ", isCredentialsAllowed=" + allowCredentials + + ", maxAge=" + maxAge + + ", allowedRequestMethods=" + allowedRequestMethods + + ", allowedRequestHeaders=" + allowedRequestHeaders + + ", preflightHeaders=" + preflightHeaders + ']'; + } + +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfigBuilder.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfigBuilder.java new file mode 100644 index 0000000000000..333e4931aa1f1 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfigBuilder.java @@ -0,0 +1,357 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio.cors; + +import io.netty.handler.codec.http.HttpMethod; + +import java.util.Arrays; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.regex.Pattern; + +/** + * Builder used to configure and build a {@link NioCorsConfig} instance. + * + * This class was lifted from the Netty project: + * https://github.com/netty/netty + */ +public final class NioCorsConfigBuilder { + + /** + * Creates a Builder instance with it's origin set to '*'. + * + * @return Builder to support method chaining. + */ + public static NioCorsConfigBuilder forAnyOrigin() { + return new NioCorsConfigBuilder(); + } + + /** + * Creates a {@link NioCorsConfigBuilder} instance with the specified origin. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public static NioCorsConfigBuilder forOrigin(final String origin) { + if ("*".equals(origin)) { + return new NioCorsConfigBuilder(); + } + return new NioCorsConfigBuilder(origin); + } + + + /** + * Create a {@link NioCorsConfigBuilder} instance with the specified pattern origin. + * + * @param pattern the regular expression pattern to match incoming origins on. + * @return {@link NioCorsConfigBuilder} with the configured origin pattern. + */ + public static NioCorsConfigBuilder forPattern(final Pattern pattern) { + if (pattern == null) { + throw new IllegalArgumentException("CORS pattern cannot be null"); + } + return new NioCorsConfigBuilder(pattern); + } + + /** + * Creates a {@link NioCorsConfigBuilder} instance with the specified origins. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public static NioCorsConfigBuilder forOrigins(final String... origins) { + return new NioCorsConfigBuilder(origins); + } + + Optional> origins; + Optional pattern; + final boolean anyOrigin; + boolean allowNullOrigin; + boolean enabled = true; + boolean allowCredentials; + long maxAge; + final Set requestMethods = new HashSet<>(); + final Set requestHeaders = new HashSet<>(); + final Map> preflightHeaders = new HashMap<>(); + private boolean noPreflightHeaders; + boolean shortCircuit; + + /** + * Creates a new Builder instance with the origin passed in. + * + * @param origins the origin to be used for this builder. + */ + NioCorsConfigBuilder(final String... origins) { + this.origins = Optional.of(new LinkedHashSet<>(Arrays.asList(origins))); + pattern = Optional.empty(); + anyOrigin = false; + } + + /** + * Creates a new Builder instance allowing any origin, "*" which is the + * wildcard origin. + * + */ + NioCorsConfigBuilder() { + anyOrigin = true; + origins = Optional.empty(); + pattern = Optional.empty(); + } + + /** + * Creates a new Builder instance allowing any origin that matches the pattern. + * + * @param pattern the pattern to match against for incoming origins. + */ + NioCorsConfigBuilder(final Pattern pattern) { + this.pattern = Optional.of(pattern); + origins = Optional.empty(); + anyOrigin = false; + } + + /** + * Web browsers may set the 'Origin' request header to 'null' if a resource is loaded + * from the local file system. Calling this method will enable a successful CORS response + * with a wildcard for the CORS response header 'Access-Control-Allow-Origin'. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + NioCorsConfigBuilder allowNullOrigin() { + allowNullOrigin = true; + return this; + } + + /** + * Disables CORS support. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder disable() { + enabled = false; + return this; + } + + /** + * By default cookies are not included in CORS requests, but this method will enable cookies to + * be added to CORS requests. Calling this method will set the CORS 'Access-Control-Allow-Credentials' + * response header to true. + * + * Please note, that cookie support needs to be enabled on the client side as well. + * The client needs to opt-in to send cookies by calling: + *
+     * xhr.withCredentials = true;
+     * 
+ * The default value for 'withCredentials' is false in which case no cookies are sent. + * Setting this to true will included cookies in cross origin requests. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder allowCredentials() { + allowCredentials = true; + return this; + } + + /** + * When making a preflight request the client has to perform two request with can be inefficient. + * This setting will set the CORS 'Access-Control-Max-Age' response header and enables the + * caching of the preflight response for the specified time. During this time no preflight + * request will be made. + * + * @param max the maximum time, in seconds, that the preflight response may be cached. + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder maxAge(final long max) { + maxAge = max; + return this; + } + + /** + * Specifies the allowed set of HTTP Request Methods that should be returned in the + * CORS 'Access-Control-Request-Method' response header. + * + * @param methods the {@link HttpMethod}s that should be allowed. + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder allowedRequestMethods(final HttpMethod... methods) { + requestMethods.addAll(Arrays.asList(methods)); + return this; + } + + /** + * Specifies the if headers that should be returned in the CORS 'Access-Control-Allow-Headers' + * response header. + * + * If a client specifies headers on the request, for example by calling: + *
+     * xhr.setRequestHeader('My-Custom-Header', "SomeValue");
+     * 
+ * the server will receive the above header name in the 'Access-Control-Request-Headers' of the + * preflight request. The server will then decide if it allows this header to be sent for the + * real request (remember that a preflight is not the real request but a request asking the server + * if it allow a request). + * + * @param headers the headers to be added to the preflight 'Access-Control-Allow-Headers' response header. + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder allowedRequestHeaders(final String... headers) { + requestHeaders.addAll(Arrays.asList(headers)); + return this; + } + + /** + * Returns HTTP response headers that should be added to a CORS preflight response. + * + * An intermediary like a load balancer might require that a CORS preflight request + * have certain headers set. This enables such headers to be added. + * + * @param name the name of the HTTP header. + * @param values the values for the HTTP header. + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder preflightResponseHeader(final CharSequence name, final Object... values) { + if (values.length == 1) { + preflightHeaders.put(name, new ConstantValueGenerator(values[0])); + } else { + preflightResponseHeader(name, Arrays.asList(values)); + } + return this; + } + + /** + * Returns HTTP response headers that should be added to a CORS preflight response. + * + * An intermediary like a load balancer might require that a CORS preflight request + * have certain headers set. This enables such headers to be added. + * + * @param name the name of the HTTP header. + * @param value the values for the HTTP header. + * @param the type of values that the Iterable contains. + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder preflightResponseHeader(final CharSequence name, final Iterable value) { + preflightHeaders.put(name, new ConstantValueGenerator(value)); + return this; + } + + /** + * Returns HTTP response headers that should be added to a CORS preflight response. + * + * An intermediary like a load balancer might require that a CORS preflight request + * have certain headers set. This enables such headers to be added. + * + * Some values must be dynamically created when the HTTP response is created, for + * example the 'Date' response header. This can be accomplished by using a Callable + * which will have its 'call' method invoked when the HTTP response is created. + * + * @param name the name of the HTTP header. + * @param valueGenerator a Callable which will be invoked at HTTP response creation. + * @param the type of the value that the Callable can return. + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder preflightResponseHeader(final CharSequence name, final Callable valueGenerator) { + preflightHeaders.put(name, valueGenerator); + return this; + } + + /** + * Specifies that no preflight response headers should be added to a preflight response. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder noPreflightResponseHeaders() { + noPreflightHeaders = true; + return this; + } + + /** + * Specifies that a CORS request should be rejected if it's invalid before being + * further processing. + * + * CORS headers are set after a request is processed. This may not always be desired + * and this setting will check that the Origin is valid and if it is not valid no + * further processing will take place, and a error will be returned to the calling client. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder shortCircuit() { + shortCircuit = true; + return this; + } + + /** + * Builds a {@link NioCorsConfig} with settings specified by previous method calls. + * + * @return {@link NioCorsConfig} the configured CorsConfig instance. + */ + public NioCorsConfig build() { + if (preflightHeaders.isEmpty() && !noPreflightHeaders) { + preflightHeaders.put("date", DateValueGenerator.INSTANCE); + preflightHeaders.put("content-length", new ConstantValueGenerator("0")); + } + return new NioCorsConfig(this); + } + + /** + * This class is used for preflight HTTP response values that do not need to be + * generated, but instead the value is "static" in that the same value will be returned + * for each call. + */ + private static final class ConstantValueGenerator implements Callable { + + private final Object value; + + /** + * Sole constructor. + * + * @param value the value that will be returned when the call method is invoked. + */ + private ConstantValueGenerator(final Object value) { + if (value == null) { + throw new IllegalArgumentException("value must not be null"); + } + this.value = value; + } + + @Override + public Object call() { + return value; + } + } + + /** + * This callable is used for the DATE preflight HTTP response HTTP header. + * It's value must be generated when the response is generated, hence will be + * different for every call. + */ + private static final class DateValueGenerator implements Callable { + + static final DateValueGenerator INSTANCE = new DateValueGenerator(); + + @Override + public Date call() throws Exception { + return new Date(); + } + } + +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java new file mode 100644 index 0000000000000..6358510703779 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java @@ -0,0 +1,235 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio.cors; + +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import org.elasticsearch.common.Strings; + +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +/** + * Handles Cross Origin Resource Sharing (CORS) requests. + *

+ * This handler can be configured using a {@link NioCorsConfig}, please + * refer to this class for details about the configuration options available. + * + * This code was borrowed from Netty 4 and refactored to work for Elasticsearch's Netty 3 setup. + */ +public class NioCorsHandler extends ChannelDuplexHandler { + + public static final String ANY_ORIGIN = "*"; + private static Pattern SCHEME_PATTERN = Pattern.compile("^https?://"); + + private final NioCorsConfig config; + private HttpRequest request; + + /** + * Creates a new instance with the specified {@link NioCorsConfig}. + */ + public NioCorsHandler(final NioCorsConfig config) { + if (config == null) { + throw new NullPointerException(); + } + this.config = config; + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + if (config.isCorsSupportEnabled() && msg instanceof HttpRequest) { + request = (HttpRequest) msg; + if (isPreflightRequest(request)) { + handlePreflight(ctx, request); + return; + } + if (config.isShortCircuit() && !validateOrigin()) { + forbidden(ctx, request); + return; + } + } + ctx.fireChannelRead(msg); + } + + public static void setCorsResponseHeaders(HttpRequest request, HttpResponse resp, NioCorsConfig config) { + if (!config.isCorsSupportEnabled()) { + return; + } + String originHeader = request.headers().get(HttpHeaderNames.ORIGIN); + if (!Strings.isNullOrEmpty(originHeader)) { + final String originHeaderVal; + if (config.isAnyOriginSupported()) { + originHeaderVal = ANY_ORIGIN; + } else if (config.isOriginAllowed(originHeader) || isSameOrigin(originHeader, request.headers().get(HttpHeaderNames.HOST))) { + originHeaderVal = originHeader; + } else { + originHeaderVal = null; + } + if (originHeaderVal != null) { + resp.headers().add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, originHeaderVal); + } + } + if (config.isCredentialsAllowed()) { + resp.headers().add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS, "true"); + } + } + + private void handlePreflight(final ChannelHandlerContext ctx, final HttpRequest request) { + final HttpResponse response = new DefaultFullHttpResponse(request.protocolVersion(), HttpResponseStatus.OK, true, true); + if (setOrigin(response)) { + setAllowMethods(response); + setAllowHeaders(response); + setAllowCredentials(response); + setMaxAge(response); + setPreflightHeaders(response); + ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE); + } else { + forbidden(ctx, request); + } + } + + private static void forbidden(final ChannelHandlerContext ctx, final HttpRequest request) { + ctx.writeAndFlush(new DefaultFullHttpResponse(request.protocolVersion(), HttpResponseStatus.FORBIDDEN)) + .addListener(ChannelFutureListener.CLOSE); + } + + private static boolean isSameOrigin(final String origin, final String host) { + if (Strings.isNullOrEmpty(host) == false) { + // strip protocol from origin + final String originDomain = SCHEME_PATTERN.matcher(origin).replaceFirst(""); + if (host.equals(originDomain)) { + return true; + } + } + return false; + } + + /** + * This is a non CORS specification feature which enables the setting of preflight + * response headers that might be required by intermediaries. + * + * @param response the HttpResponse to which the preflight response headers should be added. + */ + private void setPreflightHeaders(final HttpResponse response) { + response.headers().add(config.preflightResponseHeaders()); + } + + private boolean setOrigin(final HttpResponse response) { + final String origin = request.headers().get(HttpHeaderNames.ORIGIN); + if (!Strings.isNullOrEmpty(origin)) { + if ("null".equals(origin) && config.isNullOriginAllowed()) { + setAnyOrigin(response); + return true; + } + + if (config.isAnyOriginSupported()) { + if (config.isCredentialsAllowed()) { + echoRequestOrigin(response); + setVaryHeader(response); + } else { + setAnyOrigin(response); + } + return true; + } + if (config.isOriginAllowed(origin)) { + setOrigin(response, origin); + setVaryHeader(response); + return true; + } + } + return false; + } + + private boolean validateOrigin() { + if (config.isAnyOriginSupported()) { + return true; + } + + final String origin = request.headers().get(HttpHeaderNames.ORIGIN); + if (Strings.isNullOrEmpty(origin)) { + // Not a CORS request so we cannot validate it. It may be a non CORS request. + return true; + } + + if ("null".equals(origin) && config.isNullOriginAllowed()) { + return true; + } + + // if the origin is the same as the host of the request, then allow + if (isSameOrigin(origin, request.headers().get(HttpHeaderNames.HOST))) { + return true; + } + + return config.isOriginAllowed(origin); + } + + private void echoRequestOrigin(final HttpResponse response) { + setOrigin(response, request.headers().get(HttpHeaderNames.ORIGIN)); + } + + private static void setVaryHeader(final HttpResponse response) { + response.headers().set(HttpHeaderNames.VARY, HttpHeaderNames.ORIGIN); + } + + private static void setAnyOrigin(final HttpResponse response) { + setOrigin(response, ANY_ORIGIN); + } + + private static void setOrigin(final HttpResponse response, final String origin) { + response.headers().set(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, origin); + } + + private void setAllowCredentials(final HttpResponse response) { + if (config.isCredentialsAllowed() + && !response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN).equals(ANY_ORIGIN)) { + response.headers().set(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS, "true"); + } + } + + private static boolean isPreflightRequest(final HttpRequest request) { + final HttpHeaders headers = request.headers(); + return request.method().equals(HttpMethod.OPTIONS) && + headers.contains(HttpHeaderNames.ORIGIN) && + headers.contains(HttpHeaderNames.ACCESS_CONTROL_REQUEST_METHOD); + } + + private void setAllowMethods(final HttpResponse response) { + response.headers().set(HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS, config.allowedRequestMethods().stream() + .map(m -> m.name().trim()) + .collect(Collectors.toList())); + } + + private void setAllowHeaders(final HttpResponse response) { + response.headers().set(HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS, config.allowedRequestHeaders()); + } + + private void setMaxAge(final HttpResponse response) { + response.headers().set(HttpHeaderNames.ACCESS_CONTROL_MAX_AGE, config.maxAge()); + } + +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java index cc8eeb77cc2f6..56cbab5295a74 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java @@ -39,6 +39,8 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.http.nio.cors.NioCorsConfig; +import org.elasticsearch.http.nio.cors.NioCorsConfigBuilder; import org.elasticsearch.nio.FlushOperation; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioSocketChannel; @@ -95,7 +97,8 @@ public void setMocks() { SETTING_PIPELINING_MAX_EVENTS.getDefault(settings)); ThreadContext threadContext = new ThreadContext(settings); nioSocketChannel = mock(NioSocketChannel.class); - handler = new HttpReadWriteHandler(nioSocketChannel, transport, httpHandlingSettings, NamedXContentRegistry.EMPTY, threadContext); + handler = new HttpReadWriteHandler(nioSocketChannel, transport, httpHandlingSettings, NamedXContentRegistry.EMPTY, + NioCorsConfigBuilder.forAnyOrigin().build(), threadContext); } public void testSuccessfulDecodeHttpRequest() throws IOException { diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpChannelTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpChannelTests.java new file mode 100644 index 0000000000000..5fa0a7ae0a679 --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpChannelTests.java @@ -0,0 +1,349 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpVersion; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.http.nio.cors.NioCorsConfig; +import org.elasticsearch.http.nio.cors.NioCorsHandler; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.nio.channels.ClosedChannelException; +import java.nio.charset.StandardCharsets; +import java.util.function.BiConsumer; + +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class NioHttpChannelTests extends ESTestCase { + + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private NioSocketChannel nioChannel; + private SocketChannelContext channelContext; + + @Before + public void setup() throws Exception { + nioChannel = mock(NioSocketChannel.class); + channelContext = mock(SocketChannelContext.class); + when(nioChannel.getContext()).thenReturn(channelContext); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + } + + public void testResponse() { + final FullHttpResponse response = executeRequest(Settings.EMPTY, "request-host"); + assertThat(response.content(), equalTo(ByteBufUtils.toByteBuf(new TestResponse().content()))); + } + + public void testCorsEnabledWithoutAllowOrigins() { + // Set up a HTTP transport with only the CORS enabled setting + Settings settings = Settings.builder() + .put(HttpTransportSettings.SETTING_CORS_ENABLED.getKey(), true) + .build(); + HttpResponse response = executeRequest(settings, "remote-host", "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), nullValue()); + } + + public void testCorsEnabledWithAllowOrigins() { + final String originValue = "remote-host"; + // create a http transport with CORS enabled and allow origin configured + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) + .build(); + HttpResponse response = executeRequest(settings, originValue, "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + } + + public void testCorsAllowOriginWithSameHost() { + String originValue = "remote-host"; + String host = "remote-host"; + // create a http transport with CORS enabled + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .build(); + HttpResponse response = executeRequest(settings, originValue, host); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + + originValue = "http://" + originValue; + response = executeRequest(settings, originValue, host); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + + originValue = originValue + ":5555"; + host = host + ":5555"; + response = executeRequest(settings, originValue, host); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + + originValue = originValue.replace("http", "https"); + response = executeRequest(settings, originValue, host); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + } + + public void testThatStringLiteralWorksOnMatch() { + final String originValue = "remote-host"; + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) + .put(SETTING_CORS_ALLOW_METHODS.getKey(), "get, options, post") + .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) + .build(); + HttpResponse response = executeRequest(settings, originValue, "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), equalTo("true")); + } + + public void testThatAnyOriginWorks() { + final String originValue = NioCorsHandler.ANY_ORIGIN; + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) + .build(); + HttpResponse response = executeRequest(settings, originValue, "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), nullValue()); + } + + public void testHeadersSet() { + Settings settings = Settings.builder().build(); + final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + httpRequest.headers().add(HttpHeaderNames.ORIGIN, "remote"); + final NioHttpRequest request = new NioHttpRequest(xContentRegistry(), httpRequest); + HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); + NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); + + // send a response + NioHttpChannel channel = new NioHttpChannel(nioChannel, bigArrays, request, 1, handlingSettings, corsConfig, + threadPool.getThreadContext()); + TestResponse resp = new TestResponse(); + final String customHeader = "custom-header"; + final String customHeaderValue = "xyz"; + resp.addHeader(customHeader, customHeaderValue); + channel.sendResponse(resp); + + // inspect what was written + ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(Object.class); + verify(channelContext).sendMessage(responseCaptor.capture(), any()); + Object nioResponse = responseCaptor.getValue(); + HttpResponse response = ((NioHttpResponse) nioResponse).getResponse(); + assertThat(response.headers().get("non-existent-header"), nullValue()); + assertThat(response.headers().get(customHeader), equalTo(customHeaderValue)); + assertThat(response.headers().get(HttpHeaderNames.CONTENT_LENGTH), equalTo(Integer.toString(resp.content().length()))); + assertThat(response.headers().get(HttpHeaderNames.CONTENT_TYPE), equalTo(resp.contentType())); + } + + @SuppressWarnings("unchecked") + public void testReleaseInListener() throws IOException { + final Settings settings = Settings.builder().build(); + final NamedXContentRegistry registry = xContentRegistry(); + final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + final NioHttpRequest request = new NioHttpRequest(registry, httpRequest); + HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); + NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); + + NioHttpChannel channel = new NioHttpChannel(nioChannel, bigArrays, request, 1, handlingSettings, + corsConfig, threadPool.getThreadContext()); + final BytesRestResponse response = new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, + JsonXContent.contentBuilder().startObject().endObject()); + assertThat(response.content(), not(instanceOf(Releasable.class))); + + // ensure we have reserved bytes + if (randomBoolean()) { + BytesStreamOutput out = channel.bytesOutput(); + assertThat(out, instanceOf(ReleasableBytesStreamOutput.class)); + } else { + try (XContentBuilder builder = channel.newBuilder()) { + // do something builder + builder.startObject().endObject(); + } + } + + channel.sendResponse(response); + Class> listenerClass = (Class>) (Class) BiConsumer.class; + ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(listenerClass); + verify(channelContext).sendMessage(any(), listenerCaptor.capture()); + BiConsumer listener = listenerCaptor.getValue(); + if (randomBoolean()) { + listener.accept(null, null); + } else { + listener.accept(null, new ClosedChannelException()); + } + // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released + } + + + @SuppressWarnings("unchecked") + public void testConnectionClose() throws Exception { + final Settings settings = Settings.builder().build(); + final FullHttpRequest httpRequest; + final boolean close = randomBoolean(); + if (randomBoolean()) { + httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + if (close) { + httpRequest.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE); + } + } else { + httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.GET, "/"); + if (!close) { + httpRequest.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE); + } + } + final NioHttpRequest request = new NioHttpRequest(xContentRegistry(), httpRequest); + + HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); + NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); + + NioHttpChannel channel = new NioHttpChannel(nioChannel, bigArrays, request, 1, handlingSettings, + corsConfig, threadPool.getThreadContext()); + final TestResponse resp = new TestResponse(); + channel.sendResponse(resp); + Class> listenerClass = (Class>) (Class) BiConsumer.class; + ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(listenerClass); + verify(channelContext).sendMessage(any(), listenerCaptor.capture()); + BiConsumer listener = listenerCaptor.getValue(); + listener.accept(null, null); + if (close) { + verify(nioChannel, times(1)).close(); + } else { + verify(nioChannel, times(0)).close(); + } + } + + private FullHttpResponse executeRequest(final Settings settings, final String host) { + return executeRequest(settings, null, host); + } + + private FullHttpResponse executeRequest(final Settings settings, final String originValue, final String host) { + // construct request and send it over the transport layer + final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + if (originValue != null) { + httpRequest.headers().add(HttpHeaderNames.ORIGIN, originValue); + } + httpRequest.headers().add(HttpHeaderNames.HOST, host); + final NioHttpRequest request = new NioHttpRequest(xContentRegistry(), httpRequest); + + HttpHandlingSettings httpHandlingSettings = HttpHandlingSettings.fromSettings(settings); + NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); + NioHttpChannel channel = new NioHttpChannel(nioChannel, bigArrays, request, 1, httpHandlingSettings, corsConfig, + threadPool.getThreadContext()); + channel.sendResponse(new TestResponse()); + + // get the response + ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(Object.class); + verify(channelContext, atLeastOnce()).sendMessage(responseCaptor.capture(), any()); + return ((NioHttpResponse) responseCaptor.getValue()).getResponse(); + } + + private static class TestResponse extends RestResponse { + + private final BytesReference reference; + + TestResponse() { + reference = ByteBufUtils.toBytesReference(Unpooled.copiedBuffer("content", StandardCharsets.UTF_8)); + } + + @Override + public String contentType() { + return "text"; + } + + @Override + public BytesReference content() { + return reference; + } + + @Override + public RestStatus status() { + return RestStatus.OK; + } + + } +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java index 4741bd69a527a..c43fc7d072360 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java @@ -32,6 +32,7 @@ import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpVersion; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; @@ -45,6 +46,7 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.NullDispatcher; +import org.elasticsearch.http.nio.cors.NioCorsConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; @@ -58,9 +60,19 @@ import java.io.IOException; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; - +import java.util.stream.Collectors; + +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; import static org.elasticsearch.rest.RestStatus.OK; import static org.hamcrest.Matchers.containsString; @@ -94,36 +106,36 @@ public void shutdown() throws Exception { bigArrays = null; } -// public void testCorsConfig() { -// final Set methods = new HashSet<>(Arrays.asList("get", "options", "post")); -// final Set headers = new HashSet<>(Arrays.asList("Content-Type", "Content-Length")); -// final String prefix = randomBoolean() ? " " : ""; // sometimes have a leading whitespace between comma delimited elements -// final Settings settings = Settings.builder() -// .put(SETTING_CORS_ENABLED.getKey(), true) -// .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "*") -// .put(SETTING_CORS_ALLOW_METHODS.getKey(), collectionToDelimitedString(methods, ",", prefix, "")) -// .put(SETTING_CORS_ALLOW_HEADERS.getKey(), collectionToDelimitedString(headers, ",", prefix, "")) -// .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) -// .build(); -// final Netty4CorsConfig corsConfig = Netty4HttpServerTransport.buildCorsConfig(settings); -// assertTrue(corsConfig.isAnyOriginSupported()); -// assertEquals(headers, corsConfig.allowedRequestHeaders()); -// assertEquals(methods, corsConfig.allowedRequestMethods().stream().map(HttpMethod::name).collect(Collectors.toSet())); -// } + public void testCorsConfig() { + final Set methods = new HashSet<>(Arrays.asList("get", "options", "post")); + final Set headers = new HashSet<>(Arrays.asList("Content-Type", "Content-Length")); + final String prefix = randomBoolean() ? " " : ""; // sometimes have a leading whitespace between comma delimited elements + final Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "*") + .put(SETTING_CORS_ALLOW_METHODS.getKey(), Strings.collectionToDelimitedString(methods, ",", prefix, "")) + .put(SETTING_CORS_ALLOW_HEADERS.getKey(), Strings.collectionToDelimitedString(headers, ",", prefix, "")) + .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) + .build(); + final NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); + assertTrue(corsConfig.isAnyOriginSupported()); + assertEquals(headers, corsConfig.allowedRequestHeaders()); + assertEquals(methods, corsConfig.allowedRequestMethods().stream().map(HttpMethod::name).collect(Collectors.toSet())); + } -// public void testCorsConfigWithDefaults() { -// final Set methods = Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_METHODS.getDefault(Settings.EMPTY)); -// final Set headers = Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_HEADERS.getDefault(Settings.EMPTY)); -// final long maxAge = SETTING_CORS_MAX_AGE.getDefault(Settings.EMPTY); -// final Settings settings = Settings.builder().put(SETTING_CORS_ENABLED.getKey(), true).build(); -// final Netty4CorsConfig corsConfig = Netty4HttpServerTransport.buildCorsConfig(settings); -// assertFalse(corsConfig.isAnyOriginSupported()); -// assertEquals(Collections.emptySet(), corsConfig.origins().get()); -// assertEquals(headers, corsConfig.allowedRequestHeaders()); -// assertEquals(methods, corsConfig.allowedRequestMethods().stream().map(HttpMethod::name).collect(Collectors.toSet())); -// assertEquals(maxAge, corsConfig.maxAge()); -// assertFalse(corsConfig.isCredentialsAllowed()); -// } + public void testCorsConfigWithDefaults() { + final Set methods = Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_METHODS.getDefault(Settings.EMPTY)); + final Set headers = Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_HEADERS.getDefault(Settings.EMPTY)); + final long maxAge = SETTING_CORS_MAX_AGE.getDefault(Settings.EMPTY); + final Settings settings = Settings.builder().put(SETTING_CORS_ENABLED.getKey(), true).build(); + final NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); + assertFalse(corsConfig.isAnyOriginSupported()); + assertEquals(Collections.emptySet(), corsConfig.origins().get()); + assertEquals(headers, corsConfig.allowedRequestHeaders()); + assertEquals(methods, corsConfig.allowedRequestMethods().stream().map(HttpMethod::name).collect(Collectors.toSet())); + assertEquals(maxAge, corsConfig.maxAge()); + assertFalse(corsConfig.isCredentialsAllowed()); + } /** * Test that {@link NioHttpServerTransport} supports the "Expect: 100-continue" HTTP header diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..db3885eb62fab --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +8cd761f40c4a89ed977167f0518d12e409eaf3d8 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-analyzers-common-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 82585bb7ff3b3..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0f75703c30756c31f7d09ec79191dab6fb35c958 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..bd8711a4d53d9 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +8c93ed67599d345b9359586248ab92342d7d3033 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-backward-codecs-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 981e6d1a1730f..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5c519fdea65726612f79e3dd942b7316966646e \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..36bf03bbbdb54 --- /dev/null +++ b/server/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +003ed080e5184661e606091cd321c229798b22f8 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-core-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index cea13d14fe1c7..0000000000000 --- a/server/licenses/lucene-core-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f345b6aa3c550dafc63de3e5a5c404691e782336 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..0f940ee9c7ac7 --- /dev/null +++ b/server/licenses/lucene-grouping-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +0b4be9f96edfd3dbcff5aa9b3f0914e86eb9cc51 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-grouping-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index fcb173608efb8..0000000000000 --- a/server/licenses/lucene-grouping-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7a74855e37124a27af36390c9d15abe33614129e \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..fdc9336fb2ce2 --- /dev/null +++ b/server/licenses/lucene-highlighter-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +a5dcceb5bc017cee6ab5d3ee1943aca1ac6fe074 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-highlighter-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 79addefbfc615..0000000000000 --- a/server/licenses/lucene-highlighter-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0e3df4b469465ef101254fdcbb08ebd8a19f1f9d \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..62726ca415a48 --- /dev/null +++ b/server/licenses/lucene-join-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +b59e7441f121da969bef8eef2c0c61743b4230a8 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-join-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index a9838db7caae4..0000000000000 --- a/server/licenses/lucene-join-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -05d236149c99c860e6b627a8f78ea32918c108c3 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..a68093d2fc42e --- /dev/null +++ b/server/licenses/lucene-memory-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +46736dbb07b432f0a7c1b3080f62932c483e5cb9 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-memory-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 679c79788f500..0000000000000 --- a/server/licenses/lucene-memory-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d83e7e65eb268425f7bd5be2425d4a00b556bc47 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..23e2b68f3dfcf --- /dev/null +++ b/server/licenses/lucene-misc-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +ee203718d525da0c6258a51a5a32d877089fe5af \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-misc-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index c403d4c4f86b7..0000000000000 --- a/server/licenses/lucene-misc-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -440a998b5bf99871bec4272a219de01b25751d5c \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..5bac053813ea2 --- /dev/null +++ b/server/licenses/lucene-queries-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +cf17a332d8e42a45e8f013d5df408f4391d2620a \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-queries-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 6b8897d1ae7b7..0000000000000 --- a/server/licenses/lucene-queries-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a5c031155fdfa743af321150c0dd654a6ea3c71 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..471aa797028a7 --- /dev/null +++ b/server/licenses/lucene-queryparser-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +04832303d70502d2ece44501cb1716f42e24fe35 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-queryparser-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index b6c6bf766101d..0000000000000 --- a/server/licenses/lucene-queryparser-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d021c9a461ff0f020d038ad5ecc5127973d4674a \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..486dafc10c73f --- /dev/null +++ b/server/licenses/lucene-sandbox-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +639313e3a9573779b6a28b45a7f57fc1f73ffa46 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-sandbox-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 92f64fca2c749..0000000000000 --- a/server/licenses/lucene-sandbox-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9877a14c53e69b39fff2bf10d49a61171746d940 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..0a083b5a078ac --- /dev/null +++ b/server/licenses/lucene-spatial-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +6144b493ba3588a638858d0058054758acc619b9 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-spatial-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 2f691988c4495..0000000000000 --- a/server/licenses/lucene-spatial-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7d7e5101b46a120efa311509948c0d1f9bf30155 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..851b0d76d3e7a --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +9d00c6b8bbbbb496aecd555406267fee9e0af914 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-spatial-extras-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 86c147f961020..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5a4c11db96ae70b9048243cc530fcbc76faa0978 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..22ce3c7244338 --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +159cdb6d36845690cb1972d02cc0b472bb14b7f3 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-spatial3d-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 2fbdcdecf1a08..0000000000000 --- a/server/licenses/lucene-spatial3d-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afb01af1450067b145ca2c1d737b5907288af560 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..0724381bcc6a6 --- /dev/null +++ b/server/licenses/lucene-suggest-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +af1dd0218d58990cca5c1592d9722e67d233c996 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/server/licenses/lucene-suggest-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index 1a86525735c05..0000000000000 --- a/server/licenses/lucene-suggest-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -473f0221e0b2ea45940d8ae6dcf16e39c81b18c2 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index 792f2135e78f0..3c35b97702429 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -63,7 +63,7 @@ public RoutingExplanations getExplanations() { @Override public void readFrom(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { super.readFrom(in); state = ClusterState.readFrom(in, null); explanations = RoutingExplanations.readFrom(in); @@ -76,7 +76,7 @@ public void readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { super.writeTo(out); state.writeTo(out); RoutingExplanations.writeTo(explanations, out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index 2691d5b5b09d8..cc29e60aa996f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -68,7 +68,7 @@ public class ClusterUpdateSettingsResponse extends AcknowledgedResponse { @Override public void readFrom(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { super.readFrom(in); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); @@ -89,7 +89,7 @@ public Settings getPersistentSettings() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { super.writeTo(out); Settings.writeSettingsToStream(transientSettings, out); Settings.writeSettingsToStream(persistentSettings, out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java index 1342e62c65250..2d699591192f1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -115,7 +115,7 @@ public boolean isRolledOver() { @Override public void readFrom(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { super.readFrom(in); oldIndex = in.readString(); newIndex = in.readString(); @@ -144,7 +144,7 @@ public void readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { super.writeTo(out); out.writeString(oldIndex); out.writeString(newIndex); diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java index 6b9992e7e4c3a..7e3a385443f84 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java @@ -74,6 +74,29 @@ public interface BlobContainer { */ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException; + /** + * Reads blob content from the input stream and writes it to the container in a new blob with the given name, + * using an atomic write operation if the implementation supports it. When the BlobContainer implementation + * does not provide a specific implementation of writeBlobAtomic(String, InputStream, long), then + * the {@link #writeBlob(String, InputStream, long)} method is used. + * + * This method assumes the container does not already contain a blob of the same blobName. If a blob by the + * same name already exists, the operation will fail and an {@link IOException} will be thrown. + * + * @param blobName + * The name of the blob to write the contents of the input stream to. + * @param inputStream + * The input stream from which to retrieve the bytes to write to the blob. + * @param blobSize + * The size of the blob to be written, in bytes. It is implementation dependent whether + * this value is used in writing the blob to the repository. + * @throws FileAlreadyExistsException if a blob by the same name already exists + * @throws IOException if the input stream could not be read, or the target blob could not be written to. + */ + default void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { + writeBlob(blobName, inputStream, blobSize); + } + /** * Deletes a blob with giving name, if the blob exists. If the blob does not exist, * this method throws a NoSuchFileException. diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index a9600681d1605..6f1df0011b147 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -19,11 +19,12 @@ package org.elasticsearch.common.blobstore.fs; -import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.core.internal.io.Streams; import java.io.BufferedInputStream; @@ -56,8 +57,9 @@ */ public class FsBlobContainer extends AbstractBlobContainer { - protected final FsBlobStore blobStore; + private static final String TEMP_FILE_PREFIX = "pending-"; + protected final FsBlobStore blobStore; protected final Path path; public FsBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path) { @@ -131,6 +133,48 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize) t IOUtils.fsync(path, true); } + @Override + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { + final String tempBlob = tempBlobName(blobName); + final Path tempBlobPath = path.resolve(tempBlob); + try { + try (OutputStream outputStream = Files.newOutputStream(tempBlobPath, StandardOpenOption.CREATE_NEW)) { + Streams.copy(inputStream, outputStream); + } + IOUtils.fsync(tempBlobPath, false); + + final Path blobPath = path.resolve(blobName); + // If the target file exists then Files.move() behaviour is implementation specific + // the existing file might be replaced or this method fails by throwing an IOException. + if (Files.exists(blobPath)) { + throw new FileAlreadyExistsException("blob [" + blobPath + "] already exists, cannot overwrite"); + } + Files.move(tempBlobPath, blobPath, StandardCopyOption.ATOMIC_MOVE); + } catch (IOException ex) { + try { + deleteBlobIgnoringIfNotExists(tempBlob); + } catch (IOException e) { + ex.addSuppressed(e); + } + throw ex; + } finally { + IOUtils.fsync(path, true); + } + } + + public static String tempBlobName(final String blobName) { + return "pending-" + blobName + "-" + UUIDs.randomBase64UUID(); + } + + /** + * Returns true if the blob is a leftover temporary blob. + * + * The temporary blobs might be left after failed atomic write operation. + */ + public static boolean isTempBlobName(final String blobName) { + return blobName.startsWith(TEMP_FILE_PREFIX); + } + @Override public void move(String source, String target) throws IOException { Path sourcePath = path.resolve(source); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 25138a2909606..5fe10d8fc684f 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -44,7 +44,6 @@ import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SegmentReader; -import org.apache.lucene.index.SoftDeletesDirectoryReaderWrapper; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.FieldDoc; @@ -145,21 +144,11 @@ public static Iterable files(SegmentInfos infos) throws IOException { public static int getNumDocs(SegmentInfos info) { int numDocs = 0; for (SegmentCommitInfo si : info) { - numDocs += si.info.maxDoc() - si.getDelCount(); + numDocs += si.info.maxDoc() - si.getDelCount() - si.getSoftDelCount(); } return numDocs; } - /** - * Unlike {@link #getNumDocs(SegmentInfos)} this method returns a numDocs that always excludes soft-deleted docs. - * This method is expensive thus prefer using {@link #getNumDocs(SegmentInfos)} unless an exact numDocs is required. - */ - public static int getExactNumDocs(IndexCommit commit) throws IOException { - try (DirectoryReader reader = DirectoryReader.open(commit)) { - return new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETE_FIELD).numDocs(); - } - } - /** * Reads the segments infos from the given commit, failing if it fails to load */ @@ -212,6 +201,7 @@ public static SegmentInfos pruneUnreferencedFiles(String segmentsFileName, Direc } final CommitPoint cp = new CommitPoint(si, directory); try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) + .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) .setIndexCommit(cp) .setCommitOnClose(false) .setMergePolicy(NoMergePolicy.INSTANCE) @@ -235,6 +225,7 @@ public static void cleanLuceneIndex(Directory directory) throws IOException { } } try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER) + .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) .setMergePolicy(NoMergePolicy.INSTANCE) // no merges .setCommitOnClose(false) // no commits .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append... diff --git a/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java b/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java index df038e8303edb..7559b058ea770 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java @@ -19,6 +19,19 @@ package org.elasticsearch.http; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; + +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; + public class HttpHandlingSettings { private final int maxContentLength; @@ -30,6 +43,7 @@ public class HttpHandlingSettings { private final int compressionLevel; private final boolean detailedErrorsEnabled; private final int pipeliningMaxEvents; + private boolean corsEnabled; public HttpHandlingSettings(int maxContentLength, int maxChunkSize, int maxHeaderSize, int maxInitialLineLength, boolean resetCookies, boolean compression, int compressionLevel, boolean detailedErrorsEnabled, @@ -45,6 +59,18 @@ public HttpHandlingSettings(int maxContentLength, int maxChunkSize, int maxHeade this.pipeliningMaxEvents = pipeliningMaxEvents; } + public static HttpHandlingSettings fromSettings(Settings settings) { + return new HttpHandlingSettings(Math.toIntExact(SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings).getBytes()), + Math.toIntExact(SETTING_HTTP_MAX_CHUNK_SIZE.get(settings).getBytes()), + Math.toIntExact(SETTING_HTTP_MAX_HEADER_SIZE.get(settings).getBytes()), + Math.toIntExact(SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings).getBytes()), + SETTING_HTTP_RESET_COOKIES.get(settings), + SETTING_HTTP_COMPRESSION.get(settings), + SETTING_HTTP_COMPRESSION_LEVEL.get(settings), + SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings), + SETTING_PIPELINING_MAX_EVENTS.get(settings)); + } + public int getMaxContentLength() { return maxContentLength; } @@ -80,4 +106,8 @@ public boolean getDetailedErrorsEnabled() { public int getPipeliningMaxEvents() { return pipeliningMaxEvents; } + + public boolean isCorsEnabled() { + return corsEnabled; + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java index 8fbbe3a9deaa9..21025046b8c57 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java @@ -39,13 +39,13 @@ public final class CommitStats implements Streamable, ToXContentFragment { private String id; // lucene commit id in base 64; private int numDocs; - public CommitStats(SegmentInfos segmentInfos, int numDocs) { + public CommitStats(SegmentInfos segmentInfos) { // clone the map to protect against concurrent changes userData = MapBuilder.newMapBuilder().putAll(segmentInfos.getUserData()).immutableMap(); // lucene calls the current generation, last generation. generation = segmentInfos.getLastGeneration(); id = Base64.getEncoder().encodeToString(segmentInfos.getId()); - this.numDocs = numDocs; + numDocs = Lucene.getNumDocs(segmentInfos); } private CommitStats() { diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index d0aaab1dbc5a0..874f03ae434de 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -632,9 +632,7 @@ protected final void ensureOpen() { /** get commits stats for the last commit */ public CommitStats commitStats() { - try (Engine.Searcher searcher = acquireSearcher("commit_stats", Engine.SearcherScope.INTERNAL)) { - return new CommitStats(getLastCommittedSegmentInfos(), searcher.reader().numDocs()); - } + return new CommitStats(getLastCommittedSegmentInfos()); } /** diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java index cbe1721f07f71..40c8277d3991a 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java @@ -56,13 +56,13 @@ final class TranslogLeafReader extends LeafReader { private final Translog.Index operation; private static final FieldInfo FAKE_SOURCE_FIELD = new FieldInfo(SourceFieldMapper.NAME, 1, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0,0); + 0, 0, false); private static final FieldInfo FAKE_ROUTING_FIELD = new FieldInfo(RoutingFieldMapper.NAME, 2, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0,0); + 0, 0, false); private static final FieldInfo FAKE_ID_FIELD = new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0,0); + 0, 0, false); private final Version indexVersionCreated; TranslogLeafReader(Translog.Index operation, Version indexVersionCreated) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java index 1838b60050e4c..e19bdb6708370 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java @@ -40,6 +40,8 @@ import org.elasticsearch.index.fielddata.plain.BytesBinaryDVIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.DocValueFormat; +import org.joda.time.DateTimeZone; import java.io.IOException; import java.util.Base64; @@ -104,6 +106,10 @@ public String typeName() { return CONTENT_TYPE; } + @Override + public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) { + return DocValueFormat.BINARY; + } @Override public BytesReference valueForDisplay(Object value) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 54718c545a44e..0a03e8601b42d 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -156,6 +156,7 @@ void addIndices(final RecoveryState.Index indexRecoveryStats, final Directory ta final Directory hardLinkOrCopyTarget = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); IndexWriterConfig iwc = new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) .setCommitOnClose(false) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index c76169f40b43e..68b672de1ee25 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -864,7 +864,7 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg Map commitUserDataBuilder = new HashMap<>(); try { final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory); - numDocs = Lucene.getExactNumDocs(commit != null ? commit : findIndexCommit(directory, segmentCommitInfos)); + numDocs = Lucene.getNumDocs(segmentCommitInfos); commitUserDataBuilder.putAll(segmentCommitInfos.getUserData()); Version maxVersion = segmentCommitInfos.getMinSegmentLuceneVersion(); // we don't know which version was used to write so we take the max version. for (SegmentCommitInfo info : segmentCommitInfos) { @@ -947,16 +947,6 @@ public static void hashFile(BytesRefBuilder fileHash, InputStream in, long size) assert fileHash.length() == len : Integer.toString(fileHash.length()) + " != " + Integer.toString(len); } - private static IndexCommit findIndexCommit(Directory directory, SegmentInfos sis) throws IOException { - List commits = DirectoryReader.listCommits(directory); - for (IndexCommit commit : commits) { - if (commit.getSegmentsFileName().equals(sis.getSegmentsFileName())) { - return commit; - } - } - throw new IOException("Index commit [" + sis.getSegmentsFileName() + "] is not found"); - } - @Override public Iterator iterator() { return metadata.values().iterator(); @@ -1604,6 +1594,7 @@ private static IndexWriter newIndexWriter(final IndexWriterConfig.OpenMode openM throws IOException { assert openMode == IndexWriterConfig.OpenMode.APPEND || commit == null : "can't specify create flag with a commit"; IndexWriterConfig iwc = new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) .setCommitOnClose(false) .setIndexCommit(commit) // we don't want merges to happen here - we call maybe merge on the engine diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index b8bd93e05a6f8..56a084196131f 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -33,6 +33,7 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.OutputStreamDataOutput; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cli.EnvironmentAwareCommand; @@ -179,6 +180,7 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th terminal.println("Marking index with the new history uuid"); // commit the new histroy id IndexWriterConfig iwc = new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) .setCommitOnClose(false) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index dfe804cb1442b..63210807deebb 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.flush; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.index.SegmentInfos; import org.elasticsearch.Assertions; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -42,13 +41,13 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; @@ -468,19 +467,15 @@ public String executor() { } } - private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) throws IOException { + private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) { IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true); logger.trace("{} performing pre sync flush", request.shardId()); indexShard.flush(flushRequest); - try (Engine.IndexCommitRef commitRef = indexShard.acquireLastIndexCommit(false)) { - final SegmentInfos segmentInfos = Lucene.readSegmentInfos(commitRef.getIndexCommit()); - final int numDocs = Lucene.getExactNumDocs(commitRef.getIndexCommit()); - final Engine.CommitId commitId = new Engine.CommitId(segmentInfos.getId()); - final String syncId = segmentInfos.userData.get(Engine.SYNC_COMMIT_ID); - logger.trace("{} pre sync flush done. commit id {}, num docs {}", request.shardId(), commitId, numDocs); - return new PreSyncedFlushResponse(commitId, numDocs, syncId); - } + final CommitStats commitStats = indexShard.commitStats(); + final Engine.CommitId commitId = commitStats.getRawCommitId(); + logger.trace("{} pre sync flush done. commit id {}, num docs {}", request.shardId(), commitId, commitStats.getNumDocs()); + return new PreSyncedFlushResponse(commitId, commitStats.getNumDocs(), commitStats.syncId()); } private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest request) { @@ -507,18 +502,7 @@ private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) { if (indexShard.routingEntry().primary() == false) { throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard"); } - if (Assertions.ENABLED) { - if (logger.isTraceEnabled()) { - logger.trace("in flight operations {}, acquirers {}", indexShard.getActiveOperationsCount(), indexShard.getActiveOperations()); - } - } int opCount = indexShard.getActiveOperationsCount(); - // Need to snapshot the debug info twice as it's updated concurrently with the permit count. - if (Assertions.ENABLED) { - if (logger.isTraceEnabled()) { - logger.trace("in flight operations {}, acquirers {}", indexShard.getActiveOperationsCount(), indexShard.getActiveOperations()); - } - } return new InFlightOpsResponse(opCount); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 3e09312bec86f..cb49eed25f8fe 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -289,7 +289,7 @@ public RecoveryResponse newInstance() { * @param recoveryTarget the target of the recovery * @return a snapshot of the store metadata */ - static Store.MetadataSnapshot getStoreMetadataSnapshot(final Logger logger, final RecoveryTarget recoveryTarget) { + private Store.MetadataSnapshot getStoreMetadataSnapshot(final RecoveryTarget recoveryTarget) { try { return recoveryTarget.indexShard().snapshotStoreMetadata(); } catch (final org.apache.lucene.index.IndexNotFoundException e) { @@ -312,7 +312,7 @@ private StartRecoveryRequest getStartRecoveryRequest(final RecoveryTarget recove final StartRecoveryRequest request; logger.trace("{} collecting local files for [{}]", recoveryTarget.shardId(), recoveryTarget.sourceNode()); - final Store.MetadataSnapshot metadataSnapshot = getStoreMetadataSnapshot(logger, recoveryTarget); + final Store.MetadataSnapshot metadataSnapshot = getStoreMetadataSnapshot(recoveryTarget); logger.trace("{} local file count [{}]", recoveryTarget.shardId(), metadataSnapshot.size()); final long startingSeqNo; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index cc5cfcccf3beb..b01a1363c1cf6 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -50,6 +50,7 @@ import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.fs.FsBlobContainer; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -555,10 +556,8 @@ public String startVerification() { String blobName = "master.dat"; BytesArray bytes = new BytesArray(testBytes); try (InputStream stream = bytes.streamInput()) { - testContainer.writeBlob(blobName + "-temp", stream, bytes.length()); + testContainer.writeBlobAtomic(blobName, stream, bytes.length()); } - // Make sure that move is supported - testContainer.move(blobName + "-temp", blobName); return seed; } } catch (IOException exp) { @@ -774,18 +773,8 @@ private long listBlobsToGetLatestIndexId() throws IOException { } private void writeAtomic(final String blobName, final BytesReference bytesRef) throws IOException { - final String tempBlobName = "pending-" + blobName + "-" + UUIDs.randomBase64UUID(); try (InputStream stream = bytesRef.streamInput()) { - snapshotsBlobContainer.writeBlob(tempBlobName, stream, bytesRef.length()); - snapshotsBlobContainer.move(tempBlobName, blobName); - } catch (IOException ex) { - // temporary blob creation or move failed - try cleaning up - try { - snapshotsBlobContainer.deleteBlobIgnoringIfNotExists(tempBlobName); - } catch (IOException e) { - ex.addSuppressed(e); - } - throw ex; + snapshotsBlobContainer.writeBlobAtomic(blobName, stream, bytesRef.length()); } } @@ -955,7 +944,7 @@ protected void finalize(final List snapshots, // Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier // attempt to write an index file with this generation failed mid-way after creating the temporary file. for (final String blobName : blobs.keySet()) { - if (indexShardSnapshotsFormat.isTempBlobName(blobName)) { + if (FsBlobContainer.isTempBlobName(blobName)) { try { blobContainer.deleteBlobIgnoringIfNotExists(blobName); } catch (IOException e) { @@ -1436,6 +1425,7 @@ public void restore() throws IOException { // empty shard would cause exceptions to be thrown. Since there is no data to restore from an empty // shard anyway, we just create the empty shard here and then exit. IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) .setOpenMode(IndexWriterConfig.OpenMode.CREATE) .setCommitOnClose(true)); writer.close(); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 8c8139d5abd6a..df9b41ba87299 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.OutputStreamIndexOutput; +import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.bytes.BytesArray; @@ -52,8 +53,6 @@ */ public class ChecksumBlobStoreFormat extends BlobStoreFormat { - private static final String TEMP_FILE_PREFIX = "pending-"; - private static final XContentType DEFAULT_X_CONTENT_TYPE = XContentType.SMILE; // The format version @@ -120,7 +119,7 @@ public T readBlob(BlobContainer blobContainer, String blobName) throws IOExcepti } /** - * Writes blob in atomic manner with resolving the blob name using {@link #blobName} and {@link #tempBlobName} methods. + * Writes blob in atomic manner with resolving the blob name using {@link #blobName} method. *

* The blob will be compressed and checksum will be written if required. * @@ -131,20 +130,12 @@ public T readBlob(BlobContainer blobContainer, String blobName) throws IOExcepti * @param name blob name */ public void writeAtomic(T obj, BlobContainer blobContainer, String name) throws IOException { - String blobName = blobName(name); - String tempBlobName = tempBlobName(name); - writeBlob(obj, blobContainer, tempBlobName); - try { - blobContainer.move(tempBlobName, blobName); - } catch (IOException ex) { - // Move failed - try cleaning up - try { - blobContainer.deleteBlob(tempBlobName); - } catch (Exception e) { - ex.addSuppressed(e); + final String blobName = blobName(name); + writeTo(obj, blobName, bytesArray -> { + try (InputStream stream = bytesArray.streamInput()) { + blobContainer.writeBlobAtomic(blobName, stream, bytesArray.length()); } - throw ex; - } + }); } /** @@ -157,51 +148,35 @@ public void writeAtomic(T obj, BlobContainer blobContainer, String name) throws * @param name blob name */ public void write(T obj, BlobContainer blobContainer, String name) throws IOException { - String blobName = blobName(name); - writeBlob(obj, blobContainer, blobName); + final String blobName = blobName(name); + writeTo(obj, blobName, bytesArray -> { + try (InputStream stream = bytesArray.streamInput()) { + blobContainer.writeBlob(blobName, stream, bytesArray.length()); + } + }); } - /** - * Writes blob in atomic manner without resolving the blobName using using {@link #blobName} method. - *

- * The blob will be compressed and checksum will be written if required. - * - * @param obj object to be serialized - * @param blobContainer blob container - * @param blobName blob name - */ - protected void writeBlob(T obj, BlobContainer blobContainer, String blobName) throws IOException { - BytesReference bytes = write(obj); - try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { + private void writeTo(final T obj, final String blobName, final CheckedConsumer consumer) throws IOException { + final BytesReference bytes = write(obj); + try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { final String resourceDesc = "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")"; - try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, blobName, byteArrayOutputStream, BUFFER_SIZE)) { + try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, blobName, outputStream, BUFFER_SIZE)) { CodecUtil.writeHeader(indexOutput, codec, VERSION); try (OutputStream indexOutputOutputStream = new IndexOutputOutputStream(indexOutput) { @Override public void close() throws IOException { // this is important since some of the XContentBuilders write bytes on close. // in order to write the footer we need to prevent closing the actual index input. - } }) { + } + }) { bytes.writeTo(indexOutputOutputStream); } CodecUtil.writeFooter(indexOutput); } - BytesArray bytesArray = new BytesArray(byteArrayOutputStream.toByteArray()); - try (InputStream stream = bytesArray.streamInput()) { - blobContainer.writeBlob(blobName, stream, bytesArray.length()); - } + consumer.accept(new BytesArray(outputStream.toByteArray())); } } - /** - * Returns true if the blob is a leftover temporary blob. - * - * The temporary blobs might be left after failed atomic write operation. - */ - public boolean isTempBlobName(String blobName) { - return blobName.startsWith(ChecksumBlobStoreFormat.TEMP_FILE_PREFIX); - } - protected BytesReference write(T obj) throws IOException { try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { if (compress) { @@ -222,10 +197,4 @@ protected void write(T obj, StreamOutput streamOutput) throws IOException { builder.endObject(); } } - - - protected String tempBlobName(String name) { - return TEMP_FILE_PREFIX + String.format(Locale.ROOT, blobNameFormat, name); - } - } diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 8677370fc9927..242e088747341 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -39,6 +39,7 @@ import java.text.NumberFormat; import java.text.ParseException; import java.util.Arrays; +import java.util.Base64; import java.util.Locale; import java.util.Objects; import java.util.function.LongSupplier; @@ -121,6 +122,50 @@ public BytesRef parseBytesRef(String value) { } }; + DocValueFormat BINARY = new DocValueFormat() { + + @Override + public String getWriteableName() { + return "binary"; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + } + + @Override + public Object format(long value) { + throw new UnsupportedOperationException(); + } + + @Override + public Object format(double value) { + throw new UnsupportedOperationException(); + } + + @Override + public String format(BytesRef value) { + return Base64.getEncoder() + .withoutPadding() + .encodeToString(Arrays.copyOfRange(value.bytes, value.offset, value.offset + value.length)); + } + + @Override + public long parseLong(String value, boolean roundUp, LongSupplier now) { + throw new UnsupportedOperationException(); + } + + @Override + public double parseDouble(String value, boolean roundUp, LongSupplier now) { + throw new UnsupportedOperationException(); + } + + @Override + public BytesRef parseBytesRef(String value) { + return new BytesRef(Base64.getDecoder().decode(value)); + } + }; + final class DateTime implements DocValueFormat { public static final String NAME = "date_time"; diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 66ea407f42afd..869dfe995ed4e 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -645,6 +645,7 @@ private void registerValueFormats() { registerValueFormat(DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH); registerValueFormat(DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP); registerValueFormat(DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW); + registerValueFormat(DocValueFormat.BINARY.getWriteableName(), in -> DocValueFormat.BINARY); } /** diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java index c15c0a1be7f3d..e8bd14b640dfa 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java @@ -102,6 +102,6 @@ protected ClusterUpdateSettingsResponse createBlankInstance() { public void testOldSerialisation() throws IOException { ClusterUpdateSettingsResponse original = createTestInstance(); - assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0_alpha1)); + assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0)); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java index c3ff45118152e..903accac6ab9d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.indices.rollover; +import com.carrotsearch.randomizedtesting.annotations.Repeat; + import org.elasticsearch.Version; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; @@ -132,6 +134,6 @@ protected RolloverResponse mutateInstance(RolloverResponse response) { public void testOldSerialisation() throws IOException { RolloverResponse original = createTestInstance(); - assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0_alpha1)); + assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0)); } } diff --git a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java new file mode 100644 index 0000000000000..c603eda906cae --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.blobstore.fs; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; + +public class FsBlobContainerTests extends ESTestCase { + + public void testTempBlobName() { + final String blobName = randomAlphaOfLengthBetween(1, 20); + final String tempBlobName = FsBlobContainer.tempBlobName(blobName); + assertThat(tempBlobName, startsWith("pending-")); + assertThat(tempBlobName, containsString(blobName)); + } + + public void testIsTempBlobName() { + final String tempBlobName = FsBlobContainer.tempBlobName(randomAlphaOfLengthBetween(1, 20)); + assertThat(FsBlobContainer.isTempBlobName(tempBlobName), is(true)); + } +} diff --git a/server/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java similarity index 75% rename from server/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java rename to server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java index b08b81db11aeb..9230cded82b1d 100644 --- a/server/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java +++ b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java @@ -16,23 +16,27 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.common.blobstore; +package org.elasticsearch.common.blobstore.fs; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.common.blobstore.fs.FsBlobStore; +import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; import java.io.IOException; -import java.nio.file.Path; @LuceneTestCase.SuppressFileSystems("ExtrasFS") public class FsBlobStoreContainerTests extends ESBlobStoreContainerTestCase { + protected BlobStore newBlobStore() throws IOException { - Path tempDir = createTempDir(); - Settings settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build(); - return new FsBlobStore(settings, tempDir); + final Settings settings; + if (randomBoolean()) { + settings = Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build(); + } else { + settings = Settings.EMPTY; + } + return new FsBlobStore(settings, createTempDir()); } } diff --git a/server/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java similarity index 84% rename from server/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java rename to server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java index 8b9021cae9370..59e4ffd7927ca 100644 --- a/server/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java +++ b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java @@ -16,10 +16,12 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.common.blobstore; +package org.elasticsearch.common.blobstore.fs; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.common.blobstore.fs.FsBlobStore; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -32,10 +34,15 @@ @LuceneTestCase.SuppressFileSystems("ExtrasFS") public class FsBlobStoreTests extends ESBlobStoreTestCase { + protected BlobStore newBlobStore() throws IOException { - Path tempDir = createTempDir(); - Settings settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build(); - return new FsBlobStore(settings, tempDir); + final Settings settings; + if (randomBoolean()) { + settings = Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build(); + } else { + settings = Settings.EMPTY; + } + return new FsBlobStore(settings, createTempDir()); } public void testReadOnly() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index c4292410d0ac5..d3119bbc0fdc4 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -23,7 +23,6 @@ import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -108,7 +107,6 @@ import static org.hamcrest.Matchers.notNullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/30228") // What if DV is corrupted? public class CorruptedFileIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index 94bd8e80898db..a543e87adcb46 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -46,16 +46,13 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.Arrays; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -103,7 +100,7 @@ public void onFailure(Exception e) { } } - public void testSyncedFlush() throws ExecutionException, InterruptedException, IOException { + public void testSyncedFlush() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)).get(); ensureGreen(); @@ -246,16 +243,6 @@ private void indexDoc(Engine engine, String id) throws IOException { assertThat(indexResult.getFailure(), nullValue()); } - private String syncedFlushDescription(ShardsSyncedFlushResult result) { - String detail = result.shardResponses().entrySet().stream() - .map(e -> "Shard [" + e.getKey() + "], result [" + e.getValue() + "]") - .collect(Collectors.joining(",")); - return String.format(Locale.ROOT, "Total shards: [%d], failed: [%s], reason: [%s], detail: [%s]", - result.totalShards(), result.failed(), result.failureReason(), detail); - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29392") - @TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE") public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; @@ -281,7 +268,6 @@ public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i); } final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - logger.info("Partial seal: {}", syncedFlushDescription(partialResult)); assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1)); assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas)); assertThat(partialResult.shardResponses().get(outOfSyncReplica.routingEntry()).failureReason, equalTo( @@ -297,8 +283,6 @@ public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29392") - @TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE") public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; @@ -315,11 +299,9 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { index("test", "doc", Integer.toString(i)); } final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - logger.info("First seal: {}", syncedFlushDescription(firstSeal)); assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1)); // Do not renew synced-flush final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - logger.info("Second seal: {}", syncedFlushDescription(secondSeal)); assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId())); // Shards were updated, renew synced flush. @@ -328,7 +310,6 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { index("test", "doc", Integer.toString(i)); } final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - logger.info("Third seal: {}", syncedFlushDescription(thirdSeal)); assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId()))); // Manually remove or change sync-id, renew synced flush. @@ -344,7 +325,6 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { assertThat(shard.commitStats().syncId(), nullValue()); } final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - logger.info("Forth seal: {}", syncedFlushDescription(forthSeal)); assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId()))); } diff --git a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java index 987f69b65878a..8a8d57295a502 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java @@ -29,6 +29,9 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.test.ESTestCase.assertBusy; /** Utils for SyncedFlush */ public class SyncedFlushUtil { @@ -40,21 +43,31 @@ private SyncedFlushUtil() { /** * Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)} */ - public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) { + public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) throws Exception { + /* + * When the last indexing operation is completed, we will fire a global checkpoint sync. + * Since a global checkpoint sync request is a replication request, it will acquire an index + * shard permit on the primary when executing. If this happens at the same time while we are + * issuing the synced-flush, the synced-flush request will fail as it thinks there are + * in-flight operations. We can avoid such situation by continuing issuing another synced-flush + * if the synced-flush failed due to the ongoing operations on the primary. + */ SyncedFlushService service = cluster.getInstance(SyncedFlushService.class); - logger.debug("Issue synced-flush on node [{}], shard [{}], cluster state [{}]", - service.nodeName(), shardId, cluster.clusterService(service.nodeName()).state()); - LatchedListener listener = new LatchedListener<>(); - service.attemptSyncedFlush(shardId, listener); - try { + AtomicReference> listenerHolder = new AtomicReference<>(); + assertBusy(() -> { + LatchedListener listener = new LatchedListener<>(); + listenerHolder.set(listener); + service.attemptSyncedFlush(shardId, listener); listener.latch.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); + if (listener.result != null && listener.result.failureReason() != null + && listener.result.failureReason().contains("ongoing operations on primary")) { + throw new AssertionError(listener.result.failureReason()); // cause the assert busy to retry + } + }); + if (listenerHolder.get().error != null) { + throw ExceptionsHelper.convertToElastic(listenerHolder.get().error); } - if (listener.error != null) { - throw ExceptionsHelper.convertToElastic(listener.error); - } - return listener.result; + return listenerHolder.get().result; } public static final class LatchedListener implements ActionListener { diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 9c4c1c1e736fd..7b1003a862481 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -24,18 +24,16 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.NoMergePolicy; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.translog.Translog; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import static org.hamcrest.Matchers.equalTo; @@ -94,6 +92,7 @@ public void testGetStartingSeqNo() throws Exception { replica.close("test", false); final List commits = DirectoryReader.listCommits(replica.store().directory()); IndexWriterConfig iwc = new IndexWriterConfig(null) + .setSoftDeletesField(Lucene.SOFT_DELETE_FIELD) .setCommitOnClose(false) .setMergePolicy(NoMergePolicy.INSTANCE) .setOpenMode(IndexWriterConfig.OpenMode.APPEND); @@ -111,33 +110,4 @@ public void testGetStartingSeqNo() throws Exception { closeShards(replica); } } - - public void testExactNumDocsInStoreMetadataSnapshot() throws Exception { - final IndexShard replica = newShard(false); - recoveryEmptyReplica(replica); - long flushedDocs = 0; - final int numDocs = scaledRandomIntBetween(1, 20); - final Set docIds = new HashSet<>(); - for (int i = 0; i < numDocs; i++) { - String id = Integer.toString(i); - docIds.add(id); - indexDoc(replica, "_doc", id); - if (randomBoolean()) { - replica.flush(new FlushRequest()); - flushedDocs = docIds.size(); - } - } - for (String id : randomSubsetOf(docIds)) { - deleteDoc(replica, "_doc", id); - docIds.remove(id); - if (randomBoolean()) { - replica.flush(new FlushRequest()); - flushedDocs = docIds.size(); - } - } - final RecoveryTarget recoveryTarget = new RecoveryTarget(replica, null, null, null); - assertThat(PeerRecoveryTargetService.getStoreMetadataSnapshot(logger, recoveryTarget).getNumDocs(), equalTo(flushedDocs)); - recoveryTarget.decRef(); - closeShards(replica); - } } diff --git a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java index e5cfbf98b3db9..0190627947448 100644 --- a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java @@ -44,6 +44,7 @@ public void testSerialization() throws Exception { entries.add(new Entry(DocValueFormat.class, DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW)); + entries.add(new Entry(DocValueFormat.class, DocValueFormat.BINARY.getWriteableName(), in -> DocValueFormat.BINARY)); NamedWriteableRegistry registry = new NamedWriteableRegistry(entries); BytesStreamOutput out = new BytesStreamOutput(); @@ -82,6 +83,11 @@ public void testSerialization() throws Exception { out.writeNamedWriteable(DocValueFormat.RAW); in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), registry); assertSame(DocValueFormat.RAW, in.readNamedWriteable(DocValueFormat.class)); + + out = new BytesStreamOutput(); + out.writeNamedWriteable(DocValueFormat.BINARY); + in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), registry); + assertSame(DocValueFormat.BINARY, in.readNamedWriteable(DocValueFormat.class)); } public void testRawFormat() { @@ -96,6 +102,14 @@ public void testRawFormat() { assertEquals("abc", DocValueFormat.RAW.format(new BytesRef("abc"))); } + public void testBinaryFormat() { + assertEquals("", DocValueFormat.BINARY.format(new BytesRef())); + assertEquals("KmQ", DocValueFormat.BINARY.format(new BytesRef(new byte[] {42, 100}))); + + assertEquals(new BytesRef(), DocValueFormat.BINARY.parseBytesRef("")); + assertEquals(new BytesRef(new byte[] {42, 100}), DocValueFormat.BINARY.parseBytesRef("KmQ")); + } + public void testBooleanFormat() { assertEquals(false, DocValueFormat.BOOLEAN.format(0)); assertEquals(true, DocValueFormat.BOOLEAN.format(1)); diff --git a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index a8a2669ef9b4a..ab5387b6e3f48 100644 --- a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.fields; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -700,7 +701,7 @@ public void testSingleValueFieldDatatField() throws ExecutionException, Interrup assertThat(fields.get("test_field").getValue(), equalTo("foobar")); } - public void testFieldsPulledFromFieldData() throws Exception { + public void testDocValueFields() throws Exception { createIndex("test"); String mapping = Strings @@ -744,6 +745,7 @@ public void testFieldsPulledFromFieldData() throws Exception { .endObject() .startObject("binary_field") .field("type", "binary") + .field("doc_values", true) // off by default on binary fields .endObject() .startObject("ip_field") .field("type", "ip") @@ -766,6 +768,7 @@ public void testFieldsPulledFromFieldData() throws Exception { .field("double_field", 6.0d) .field("date_field", Joda.forPattern("dateOptionalTime").printer().print(date)) .field("boolean_field", true) + .field("binary_field", new byte[] {42, 100}) .field("ip_field", "::1") .endObject()).execute().actionGet(); @@ -782,6 +785,7 @@ public void testFieldsPulledFromFieldData() throws Exception { .addDocValueField("double_field") .addDocValueField("date_field") .addDocValueField("boolean_field") + .addDocValueField("binary_field") .addDocValueField("ip_field"); SearchResponse searchResponse = builder.execute().actionGet(); @@ -790,7 +794,7 @@ public void testFieldsPulledFromFieldData() throws Exception { Set fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", "float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field", - "ip_field"))); + "binary_field", "ip_field"))); assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1")); assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2")); @@ -802,6 +806,8 @@ public void testFieldsPulledFromFieldData() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), + equalTo(new BytesRef(new byte[] {42, 100}))); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); builder = client().prepareSearch().setQuery(matchAllQuery()) @@ -815,6 +821,7 @@ public void testFieldsPulledFromFieldData() throws Exception { .addDocValueField("double_field", "use_field_mapping") .addDocValueField("date_field", "use_field_mapping") .addDocValueField("boolean_field", "use_field_mapping") + .addDocValueField("binary_field", "use_field_mapping") .addDocValueField("ip_field", "use_field_mapping"); searchResponse = builder.execute().actionGet(); @@ -823,7 +830,7 @@ public void testFieldsPulledFromFieldData() throws Exception { fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", "float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field", - "ip_field"))); + "binary_field", "ip_field"))); assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1")); assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2")); @@ -836,6 +843,7 @@ public void testFieldsPulledFromFieldData() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); builder = client().prepareSearch().setQuery(matchAllQuery()) diff --git a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 70591e06d1084..c25cad61e0740 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -100,7 +100,7 @@ public void waitForBlock(String node, String repository, TimeValue timeout) thro } Thread.sleep(100); } - fail("Timeout!!!"); + fail("Timeout waiting for node [" + node + "] to be blocked"); } public SnapshotInfo waitForCompletion(String repository, String snapshotName, TimeValue timeout) throws InterruptedException { diff --git a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java index 65926234d45c0..70be72989cf95 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java @@ -224,52 +224,16 @@ public void testAtomicWriteFailures() throws Exception { IOException writeBlobException = expectThrows(IOException.class, () -> { BlobContainer wrapper = new BlobContainerWrapper(blobContainer) { @Override - public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { - throw new IOException("Exception thrown in writeBlob() for " + blobName); + public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize) throws IOException { + throw new IOException("Exception thrown in writeBlobAtomic() for " + blobName); } }; checksumFormat.writeAtomic(blobObj, wrapper, name); }); - assertEquals("Exception thrown in writeBlob() for pending-" + name, writeBlobException.getMessage()); + assertEquals("Exception thrown in writeBlobAtomic() for " + name, writeBlobException.getMessage()); assertEquals(0, writeBlobException.getSuppressed().length); } - { - IOException moveException = expectThrows(IOException.class, () -> { - BlobContainer wrapper = new BlobContainerWrapper(blobContainer) { - @Override - public void move(String sourceBlobName, String targetBlobName) throws IOException { - throw new IOException("Exception thrown in move() for " + sourceBlobName); - } - }; - checksumFormat.writeAtomic(blobObj, wrapper, name); - }); - assertEquals("Exception thrown in move() for pending-" + name, moveException.getMessage()); - assertEquals(0, moveException.getSuppressed().length); - } - { - IOException moveThenDeleteException = expectThrows(IOException.class, () -> { - BlobContainer wrapper = new BlobContainerWrapper(blobContainer) { - @Override - public void move(String sourceBlobName, String targetBlobName) throws IOException { - throw new IOException("Exception thrown in move() for " + sourceBlobName); - } - - @Override - public void deleteBlob(String blobName) throws IOException { - throw new IOException("Exception thrown in deleteBlob() for " + blobName); - } - }; - checksumFormat.writeAtomic(blobObj, wrapper, name); - }); - - assertEquals("Exception thrown in move() for pending-" + name, moveThenDeleteException.getMessage()); - assertEquals(1, moveThenDeleteException.getSuppressed().length); - - final Throwable suppressedThrowable = moveThenDeleteException.getSuppressed()[0]; - assertTrue(suppressedThrowable instanceof IOException); - assertEquals("Exception thrown in deleteBlob() for pending-" + name, suppressedThrowable.getMessage()); - } } protected BlobStore createTestBlobStore() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java index 56a4a279cab62..089955d140f44 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java @@ -53,11 +53,21 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize) t delegate.writeBlob(blobName, inputStream, blobSize); } + @Override + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { + delegate.writeBlobAtomic(blobName, inputStream, blobSize); + } + @Override public void deleteBlob(String blobName) throws IOException { delegate.deleteBlob(blobName); } + @Override + public void deleteBlobIgnoringIfNotExists(final String blobName) throws IOException { + delegate.deleteBlobIgnoringIfNotExists(blobName); + } + @Override public Map listBlobs() throws IOException { return delegate.listBlobs(); diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index 3a5b068cd8977..5fa884adbfe62 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -19,20 +19,6 @@ package org.elasticsearch.snapshots.mockstore; -import java.io.IOException; -import java.io.InputStream; -import java.io.UnsupportedEncodingException; -import java.nio.file.Path; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicLong; - import com.carrotsearch.randomizedtesting.RandomizedContext; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; @@ -42,6 +28,7 @@ import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.fs.FsBlobContainer; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -49,11 +36,26 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.RepositoryPlugin; -import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.snapshots.SnapshotId; +import java.io.IOException; +import java.io.InputStream; +import java.io.UnsupportedEncodingException; +import java.nio.file.Path; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicLong; + public class MockRepository extends FsRepository { public static class Plugin extends org.elasticsearch.plugins.Plugin implements RepositoryPlugin { @@ -325,6 +327,12 @@ public void deleteBlob(String blobName) throws IOException { super.deleteBlob(blobName); } + @Override + public void deleteBlobIgnoringIfNotExists(String blobName) throws IOException { + maybeIOExceptionOrBlock(blobName); + super.deleteBlobIgnoringIfNotExists(blobName); + } + @Override public Map listBlobs() throws IOException { maybeIOExceptionOrBlock(""); @@ -365,6 +373,31 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize) t maybeIOExceptionOrBlock(blobName); } } + + @Override + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { + final Random random = RandomizedContext.current().getRandom(); + if (random.nextBoolean()) { + if ((delegate() instanceof FsBlobContainer) && (random.nextBoolean())) { + // Simulate a failure between the write and move operation in FsBlobContainer + final String tempBlobName = FsBlobContainer.tempBlobName(blobName); + super.writeBlob(tempBlobName, inputStream, blobSize); + maybeIOExceptionOrBlock(blobName); + final FsBlobContainer fsBlobContainer = (FsBlobContainer) delegate(); + fsBlobContainer.move(tempBlobName, blobName); + } else { + // Atomic write since it is potentially supported + // by the delegating blob container + maybeIOExceptionOrBlock(blobName); + super.writeBlobAtomic(blobName, inputStream, blobSize); + } + } else { + // Simulate a non-atomic write since many blob container + // implementations does not support atomic write + maybeIOExceptionOrBlock(blobName); + super.writeBlob(blobName, inputStream, blobSize); + } + } } } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java index 743be6d1bcb01..df2024de445c1 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java @@ -158,7 +158,11 @@ public void testVerifyOverwriteFails() throws IOException { protected void writeBlob(final BlobContainer container, final String blobName, final BytesArray bytesArray) throws IOException { try (InputStream stream = bytesArray.streamInput()) { - container.writeBlob(blobName, stream, bytesArray.length()); + if (randomBoolean()) { + container.writeBlob(blobName, stream, bytesArray.length()); + } else { + container.writeBlobAtomic(blobName, stream, bytesArray.length()); + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index b0eef695b7ff1..d39a0ad2aedaf 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -26,8 +26,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; @@ -77,10 +75,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.EngineTestCase; -import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; @@ -1105,7 +1100,7 @@ public void beforeIndexDeletion() throws Exception { // ElasticsearchIntegrationTest must override beforeIndexDeletion() to avoid failures. assertNoPendingIndexOperations(); //check that shards that have same sync id also contain same number of documents - assertSameSyncIdSameDocs(); + assertSameSyncIdSameDocs(); assertOpenTranslogReferences(); } @@ -1116,16 +1111,16 @@ private void assertSameSyncIdSameDocs() { IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); for (IndexService indexService : indexServices) { for (IndexShard indexShard : indexService) { - Tuple commitStats = commitStats(indexShard); - if (commitStats != null) { - String syncId = commitStats.v1(); - long liveDocsOnShard = commitStats.v2(); - if (docsOnShards.get(syncId) != null) { - assertThat("sync id is equal but number of docs does not match on node " + nodeAndClient.name + - ". expected " + docsOnShards.get(syncId) + " but got " + liveDocsOnShard, docsOnShards.get(syncId), - equalTo(liveDocsOnShard)); - } else { - docsOnShards.put(syncId, liveDocsOnShard); + CommitStats commitStats = indexShard.commitStats(); + if (commitStats != null) { // null if the engine is closed or if the shard is recovering + String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID); + if (syncId != null) { + long liveDocsOnShard = commitStats.getNumDocs(); + if (docsOnShards.get(syncId) != null) { + assertThat("sync id is equal but number of docs does not match on node " + nodeAndClient.name + ". expected " + docsOnShards.get(syncId) + " but got " + liveDocsOnShard, docsOnShards.get(syncId), equalTo(liveDocsOnShard)); + } else { + docsOnShards.put(syncId, liveDocsOnShard); + } } } } @@ -1133,22 +1128,6 @@ private void assertSameSyncIdSameDocs() { } } - private Tuple commitStats(IndexShard indexShard) { - try (Engine.IndexCommitRef commitRef = indexShard.acquireLastIndexCommit(false)) { - final String syncId = commitRef.getIndexCommit().getUserData().get(Engine.SYNC_COMMIT_ID); - // Only read if sync_id exists - if (Strings.hasText(syncId)) { - return Tuple.tuple(syncId, Lucene.getExactNumDocs(commitRef.getIndexCommit())); - } else { - return null; - } - } catch (IllegalIndexShardStateException ex) { - return null; // Shard is closed or not started yet. - } catch (IOException ex) { - throw new AssertionError(ex); - } - } - private void assertNoPendingIndexOperations() throws Exception { assertBusy(() -> { final Collection nodesAndClients = nodes.values(); diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 314ffced4a0f7..17e0f2b70fd2a 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -81,7 +81,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/rest-api/ml/validate-job.asciidoc', 'en/rest-api/security/authenticate.asciidoc', 'en/rest-api/watcher/stats.asciidoc', - 'en/security/authorization/managing-roles.asciidoc', 'en/watcher/example-watches/watching-time-series-data.asciidoc', ] diff --git a/x-pack/docs/en/rest-api/index.asciidoc b/x-pack/docs/en/rest-api/index.asciidoc deleted file mode 100644 index 85c72a78d99fd..0000000000000 --- a/x-pack/docs/en/rest-api/index.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -[role="xpack"] -[[xpack-api]] -= {xpack} APIs - -[partintro] --- -{xpack} exposes REST APIs that are used by the UI components and can be called -directly to configure and access {xpack} features. - -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> --- - - -include::info.asciidoc[] -include::graph/explore.asciidoc[] -include::licensing.asciidoc[] -include::migration.asciidoc[] -include::ml-api.asciidoc[] -include::rollup-api.asciidoc[] -include::security.asciidoc[] -include::watcher.asciidoc[] -include::defs.asciidoc[] diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc index 83edef1a67ba4..6ee5d9d39bbf2 100644 --- a/x-pack/docs/en/security/authorization/managing-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -12,6 +12,8 @@ A role is defined by the following JSON structure: "indices": [ ... ] <3> } ----- +// NOTCONSOLE + <1> A list of usernames the owners of this role can <>. <2> A list of cluster privileges. These privileges define the cluster level actions users with this role are able to execute. This field @@ -37,6 +39,8 @@ The following describes the structure of an indices permissions entry: "query": "..." <4> } ------- +// NOTCONSOLE + <1> A list of indices (or index name patterns) to which the permissions in this entry apply. <2> The index level privileges the owners of the role have on the associated @@ -77,8 +81,9 @@ The following snippet shows an example definition of a `clicks_admin` role: [source,js] ----------- +POST /_xpack/security/role/clicks_admin { - "run_as": [ "clicks_watcher_1" ] + "run_as": [ "clicks_watcher_1" ], "cluster": [ "monitor" ], "indices": [ { @@ -92,6 +97,7 @@ The following snippet shows an example definition of a `clicks_admin` role: ] } ----------- +// CONSOLE Based on the above definition, users owning the `clicks_admin` role can: diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index 99e6a10ad92de..40c694cedb764 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -411,7 +411,8 @@ public void clusterChanged(ClusterChangedEvent event) { // auto-generate license if no licenses ever existed or if the current license is basic and // needs extended or if the license signature needs to be updated. this will trigger a subsequent cluster changed event if (currentClusterState.getNodes().isLocalNodeElectedMaster() && - (noLicense || LicenseUtils.licenseNeedsExtended(currentLicense) || LicenseUtils.signatureNeedsUpdate(currentLicense))) { + (noLicense || LicenseUtils.licenseNeedsExtended(currentLicense) || + LicenseUtils.signatureNeedsUpdate(currentLicense, currentClusterState.nodes()))) { registerOrUpdateSelfGeneratedLicense(); } } else if (logger.isDebugEnabled()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java index 8fcdc05bcf986..4c8a558682b13 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java @@ -6,8 +6,12 @@ package org.elasticsearch.license; import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.rest.RestStatus; +import java.util.stream.StreamSupport; + public class LicenseUtils { public static final String EXPIRED_FEATURE_METADATA = "es.license.expired.feature"; @@ -42,8 +46,25 @@ public static boolean licenseNeedsExtended(License license) { * Checks if the signature of a self generated license with older version needs to be * recreated with the new key */ - public static boolean signatureNeedsUpdate(License license) { + public static boolean signatureNeedsUpdate(License license, DiscoveryNodes currentNodes) { + assert License.VERSION_CRYPTO_ALGORITHMS == License.VERSION_CURRENT : "update this method when adding a new version"; + return ("basic".equals(license.type()) || "trial".equals(license.type())) && - (license.version() < License.VERSION_CRYPTO_ALGORITHMS); + // only upgrade signature when all nodes are ready to deserialize the new signature + (license.version() < License.VERSION_CRYPTO_ALGORITHMS && + compatibleLicenseVersion(currentNodes) == License.VERSION_CRYPTO_ALGORITHMS + ); + } + + public static int compatibleLicenseVersion(DiscoveryNodes currentNodes) { + assert License.VERSION_CRYPTO_ALGORITHMS == License.VERSION_CURRENT : "update this method when adding a new version"; + + if (StreamSupport.stream(currentNodes.spliterator(), false) + .allMatch(node -> node.getVersion().onOrAfter(Version.V_6_4_0))) { + // License.VERSION_CRYPTO_ALGORITHMS was introduced in 6.4.0 + return License.VERSION_CRYPTO_ALGORITHMS; + } else { + return License.VERSION_START_DATE; + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java index 0bc49d517cd92..fb9b167d3db52 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.license; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -26,8 +27,8 @@ class SelfGeneratedLicense { - public static License create(License.Builder specBuilder) { - return create(specBuilder, License.VERSION_CURRENT); + public static License create(License.Builder specBuilder, DiscoveryNodes currentNodes) { + return create(specBuilder, LicenseUtils.compatibleLicenseVersion(currentNodes)); } public static License create(License.Builder specBuilder, int version) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java index 0cf949a69906f..468f1799a07b9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java @@ -73,7 +73,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { .issueDate(issueDate) .type("basic") .expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS); - License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentState.nodes()); if (request.isAcknowledged() == false && currentLicense != null) { Map ackMessages = LicenseService.getAckMessages(selfGeneratedLicense, currentLicense); if (ackMessages.isEmpty() == false) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java index 5c5c03151ba26..2bf0555fde111 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java @@ -82,7 +82,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { .issueDate(issueDate) .type(request.getType()) .expiryDate(expiryDate); - License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentState.nodes()); LicensesMetaData newLicensesMetaData = new LicensesMetaData(selfGeneratedLicense, Version.CURRENT); mdBuilder.putCustom(LicensesMetaData.TYPE, newLicensesMetaData); return ClusterState.builder(currentState).metaData(mdBuilder).build(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java index 13d6326f3ce1d..c2d53bd071638 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java @@ -61,7 +61,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { "]. Must be trial or basic."); } return updateWithLicense(currentState, type); - } else if (LicenseUtils.signatureNeedsUpdate(currentLicensesMetaData.getLicense())) { + } else if (LicenseUtils.signatureNeedsUpdate(currentLicensesMetaData.getLicense(), currentState.nodes())) { return updateLicenseSignature(currentState, currentLicensesMetaData); } else if (LicenseUtils.licenseNeedsExtended(currentLicensesMetaData.getLicense())) { return extendBasic(currentState, currentLicensesMetaData); @@ -87,7 +87,7 @@ private ClusterState updateLicenseSignature(ClusterState currentState, LicensesM .issueDate(issueDate) .type(type) .expiryDate(expiryDate); - License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentState.nodes()); Version trialVersion = currentLicenseMetaData.getMostRecentTrialVersion(); LicensesMetaData newLicenseMetadata = new LicensesMetaData(selfGeneratedLicense, trialVersion); mdBuilder.putCustom(LicensesMetaData.TYPE, newLicenseMetadata); @@ -120,7 +120,7 @@ private LicensesMetaData createBasicLicenseFromExistingLicense(LicensesMetaData .issueDate(currentLicense.issueDate()) .type("basic") .expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS); - License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentLicense.version()); Version trialVersion = currentLicenseMetadata.getMostRecentTrialVersion(); return new LicensesMetaData(selfGeneratedLicense, trialVersion); } @@ -141,7 +141,7 @@ private ClusterState updateWithLicense(ClusterState currentState, String type) { .issueDate(issueDate) .type(type) .expiryDate(expiryDate); - License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentState.nodes()); LicensesMetaData licensesMetaData; if ("trial".equals(type)) { licensesMetaData = new LicensesMetaData(selfGeneratedLicense, Version.CURRENT); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java index a799cbe944715..3818ebcf44758 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; @@ -173,7 +174,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par builder.endObject(); return builder; } - + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); @@ -336,6 +337,17 @@ public RollupJobConfig build() { if (indexPattern == null || indexPattern.isEmpty()) { throw new IllegalArgumentException("An index pattern is mandatory."); } + if (Regex.isMatchAllPattern(indexPattern)) { + throw new IllegalArgumentException("Index pattern must not match all indices (as it would match it's own rollup index"); + } + if (Regex.isSimpleMatchPattern(indexPattern)) { + if (Regex.simpleMatch(indexPattern, rollupIndex)) { + throw new IllegalArgumentException("Index pattern would match rollup index name which is not allowed."); + } + } + if (indexPattern.equals(rollupIndex)) { + throw new IllegalArgumentException("Rollup index may not be the same as the index pattern."); + } if (rollupIndex == null || rollupIndex.isEmpty()) { throw new IllegalArgumentException("A rollup index name is mandatory."); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java index 2a237f090e2fd..5405af013af51 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java @@ -104,7 +104,7 @@ public void testExpiredSelfGeneratedBasicLicenseIsExtended() throws Exception { .issueDate(dateMath("now-10h", now)) .type("basic") .expiryDate(dateMath("now-2h", now)); - License license = SelfGeneratedLicense.create(builder); + License license = SelfGeneratedLicense.create(builder, License.VERSION_CURRENT); XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); setInitialState(license, licenseState, Settings.EMPTY); @@ -125,4 +125,4 @@ public void testExpiredSelfGeneratedBasicLicenseIsExtended() throws Exception { assertEquals(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS, licenseMetaData.getLicense().expiryDate()); assertEquals(uid, licenseMetaData.getLicense().uid()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseSerializationTests.java index d7cf5ab50fb48..d07be0fd3c79b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseSerializationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseSerializationTests.java @@ -111,7 +111,7 @@ public void testLicenseRestViewNonExpiringBasic() throws Exception { .issueDate(now) .type("basic") .expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS); - License license = SelfGeneratedLicense.create(specBuilder); + License license = SelfGeneratedLicense.create(specBuilder, License.VERSION_CURRENT); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); license.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(License.REST_VIEW_MODE, "true"))); builder.flush(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java index f3ed04ed22dfe..d7799959f6cce 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java @@ -95,7 +95,7 @@ public void testXContentSerializationOneTrial() throws Exception { .issueDate(issueDate) .type(randomBoolean() ? "trial" : "basic") .expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis()); - final License trialLicense = SelfGeneratedLicense.create(specBuilder); + final License trialLicense = SelfGeneratedLicense.create(specBuilder, License.VERSION_CURRENT); LicensesMetaData licensesMetaData = new LicensesMetaData(trialLicense, Version.CURRENT); XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java index aa27dbdcb4964..4e061623ccd94 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java @@ -34,7 +34,7 @@ public void testBasic() throws Exception { .type(randomBoolean() ? "trial" : "basic") .issueDate(issueDate) .expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis()); - License trialLicense = SelfGeneratedLicense.create(specBuilder); + License trialLicense = SelfGeneratedLicense.create(specBuilder, License.VERSION_CURRENT); assertThat(SelfGeneratedLicense.verify(trialLicense), equalTo(true)); } @@ -47,7 +47,7 @@ public void testTampered() throws Exception { .maxNodes(5) .issueDate(issueDate) .expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis()); - License trialLicense = SelfGeneratedLicense.create(specBuilder); + License trialLicense = SelfGeneratedLicense.create(specBuilder, License.VERSION_CURRENT); final String originalSignature = trialLicense.signature(); License tamperedLicense = License.builder().fromLicenseSpec(trialLicense, originalSignature) .expiryDate(System.currentTimeMillis() + TimeValue.timeValueHours(5).getMillis()) @@ -70,7 +70,8 @@ public void testFrom1x() throws Exception { .issueDate(issueDate) .expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis()); License pre20TrialLicense = specBuilder.build(); - License license = SelfGeneratedLicense.create(License.builder().fromPre20LicenseSpec(pre20TrialLicense).type("trial")); + License license = SelfGeneratedLicense.create(License.builder().fromPre20LicenseSpec(pre20TrialLicense).type("trial"), + License.VERSION_CURRENT); assertThat(SelfGeneratedLicense.verify(license), equalTo(true)); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java index 7522f474e77b2..3d82ac118f503 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java @@ -27,8 +27,9 @@ public static RollupJobConfig.Builder getRollupJob(String jobId) { builder.setId(jobId); builder.setCron(getCronString()); builder.setTimeout(new TimeValue(ESTestCase.randomIntBetween(1,100))); - builder.setIndexPattern(ESTestCase.randomAlphaOfLengthBetween(1,10)); - builder.setRollupIndex(ESTestCase.randomAlphaOfLengthBetween(1,10)); + String indexPattern = ESTestCase.randomAlphaOfLengthBetween(1,10); + builder.setIndexPattern(indexPattern); + builder.setRollupIndex("rollup_" + indexPattern); // to ensure the index pattern != rollup index builder.setGroupConfig(ConfigTestHelpers.getGroupConfig().build()); builder.setPageSize(ESTestCase.randomIntBetween(1,10)); if (ESTestCase.randomBoolean()) { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java index 24dcb323e3dc6..819a8dfa3fe9f 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java @@ -95,8 +95,8 @@ protected void masterOperation(PutRollupJobAction.Request request, ClusterState XPackPlugin.checkReadyForXPackCustomMetadata(clusterState); FieldCapabilitiesRequest fieldCapsRequest = new FieldCapabilitiesRequest() - .indices(request.getConfig().getIndexPattern()) - .fields(request.getConfig().getAllFields().toArray(new String[0])); + .indices(request.getConfig().getIndexPattern()) + .fields(request.getConfig().getAllFields().toArray(new String[0])); client.fieldCaps(fieldCapsRequest, new ActionListener() { @Override diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java index 9dcc2e482d079..850efb95da309 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -66,6 +67,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Objects; @@ -271,91 +273,38 @@ static QueryBuilder rewriteQuery(QueryBuilder builder, Set jobCap rewriteQuery(((BoostingQueryBuilder)builder).positiveQuery(), jobCaps)); } else if (builder.getWriteableName().equals(DisMaxQueryBuilder.NAME)) { DisMaxQueryBuilder rewritten = new DisMaxQueryBuilder(); - ((DisMaxQueryBuilder)builder).innerQueries().forEach(query -> rewritten.add(rewriteQuery(query, jobCaps))); + ((DisMaxQueryBuilder) builder).innerQueries().forEach(query -> rewritten.add(rewriteQuery(query, jobCaps))); return rewritten; - } else if (builder.getWriteableName().equals(RangeQueryBuilder.NAME) || builder.getWriteableName().equals(TermQueryBuilder.NAME)) { - - String fieldName = builder.getWriteableName().equals(RangeQueryBuilder.NAME) - ? ((RangeQueryBuilder)builder).fieldName() - : ((TermQueryBuilder)builder).fieldName(); - - List incorrectTimeZones = new ArrayList<>(); - List rewrittenFieldName = jobCaps.stream() - // We only care about job caps that have the query's target field - .filter(caps -> caps.getFieldCaps().keySet().contains(fieldName)) - .map(caps -> { - RollupJobCaps.RollupFieldCaps fieldCaps = caps.getFieldCaps().get(fieldName); - return fieldCaps.getAggs().stream() - // For now, we only allow filtering on grouping fields - .filter(agg -> { - String type = (String)agg.get(RollupField.AGG); - - // If the cap is for a date_histo, and the query is a range, the timezones need to match - if (type.equals(DateHistogramAggregationBuilder.NAME) && builder instanceof RangeQueryBuilder) { - String timeZone = ((RangeQueryBuilder)builder).timeZone(); - - // Many range queries don't include the timezone because the default is UTC, but the query - // builder will return null so we need to set it here - if (timeZone == null) { - timeZone = DateTimeZone.UTC.toString(); - } - boolean matchingTZ = ((String)agg.get(DateHistoGroupConfig.TIME_ZONE.getPreferredName())) - .equalsIgnoreCase(timeZone); - if (matchingTZ == false) { - incorrectTimeZones.add((String)agg.get(DateHistoGroupConfig.TIME_ZONE.getPreferredName())); - } - return matchingTZ; - } - // Otherwise just make sure it's one of the three groups - return type.equals(TermsAggregationBuilder.NAME) - || type.equals(DateHistogramAggregationBuilder.NAME) - || type.equals(HistogramAggregationBuilder.NAME); - }) - // Rewrite the field name to our convention (e.g. "foo" -> "date_histogram.foo.timestamp") - .map(agg -> { - if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) { - return RollupField.formatFieldName(fieldName, (String)agg.get(RollupField.AGG), RollupField.TIMESTAMP); - } else { - return RollupField.formatFieldName(fieldName, (String)agg.get(RollupField.AGG), RollupField.VALUE); - } - }) - .collect(Collectors.toList()); - }) - .distinct() - .collect(ArrayList::new, List::addAll, List::addAll); - - if (rewrittenFieldName.isEmpty()) { - if (incorrectTimeZones.isEmpty()) { - throw new IllegalArgumentException("Field [" + fieldName + "] in [" + builder.getWriteableName() - + "] query is not available in selected rollup indices, cannot query."); - } else { - throw new IllegalArgumentException("Field [" + fieldName + "] in [" + builder.getWriteableName() - + "] query was found in rollup indices, but requested timezone is not compatible. Options include: " - + incorrectTimeZones); - } + } else if (builder.getWriteableName().equals(RangeQueryBuilder.NAME)) { + RangeQueryBuilder range = (RangeQueryBuilder) builder; + String fieldName = range.fieldName(); + // Many range queries don't include the timezone because the default is UTC, but the query + // builder will return null so we need to set it here + String timeZone = range.timeZone() == null ? DateTimeZone.UTC.toString() : range.timeZone(); + + String rewrittenFieldName = rewriteFieldName(jobCaps, RangeQueryBuilder.NAME, fieldName, timeZone); + RangeQueryBuilder rewritten = new RangeQueryBuilder(rewrittenFieldName) + .from(range.from()) + .to(range.to()) + .includeLower(range.includeLower()) + .includeUpper(range.includeUpper()); + if (range.timeZone() != null) { + rewritten.timeZone(range.timeZone()); } - - if (rewrittenFieldName.size() > 1) { - throw new IllegalArgumentException("Ambiguous field name resolution when mapping to rolled fields. Field name [" + - fieldName + "] was mapped to: [" + Strings.collectionToDelimitedString(rewrittenFieldName, ",") + "]."); + if (range.format() != null) { + rewritten.format(range.format()); } - - //Note: instanceof here to make casting checks happier - if (builder instanceof RangeQueryBuilder) { - RangeQueryBuilder rewritten = new RangeQueryBuilder(rewrittenFieldName.get(0)); - RangeQueryBuilder original = (RangeQueryBuilder)builder; - rewritten.from(original.from()); - rewritten.to(original.to()); - if (original.timeZone() != null) { - rewritten.timeZone(original.timeZone()); - } - rewritten.includeLower(original.includeLower()); - rewritten.includeUpper(original.includeUpper()); - return rewritten; - } else { - return new TermQueryBuilder(rewrittenFieldName.get(0), ((TermQueryBuilder)builder).value()); - } - + return rewritten; + } else if (builder.getWriteableName().equals(TermQueryBuilder.NAME)) { + TermQueryBuilder term = (TermQueryBuilder) builder; + String fieldName = term.fieldName(); + String rewrittenFieldName = rewriteFieldName(jobCaps, TermQueryBuilder.NAME, fieldName, null); + return new TermQueryBuilder(rewrittenFieldName, term.value()); + } else if (builder.getWriteableName().equals(TermsQueryBuilder.NAME)) { + TermsQueryBuilder terms = (TermsQueryBuilder) builder; + String fieldName = terms.fieldName(); + String rewrittenFieldName = rewriteFieldName(jobCaps, TermQueryBuilder.NAME, fieldName, null); + return new TermsQueryBuilder(rewrittenFieldName, terms.values()); } else if (builder.getWriteableName().equals(MatchAllQueryBuilder.NAME)) { // no-op return builder; @@ -364,6 +313,64 @@ static QueryBuilder rewriteQuery(QueryBuilder builder, Set jobCap } } + private static String rewriteFieldName(Set jobCaps, + String builderName, + String fieldName, + String timeZone) { + List incompatibleTimeZones = timeZone == null ? Collections.emptyList() : new ArrayList<>(); + List rewrittenFieldNames = jobCaps.stream() + // We only care about job caps that have the query's target field + .filter(caps -> caps.getFieldCaps().keySet().contains(fieldName)) + .map(caps -> { + RollupJobCaps.RollupFieldCaps fieldCaps = caps.getFieldCaps().get(fieldName); + return fieldCaps.getAggs().stream() + // For now, we only allow filtering on grouping fields + .filter(agg -> { + String type = (String)agg.get(RollupField.AGG); + + // If the cap is for a date_histo, and the query is a range, the timezones need to match + if (type.equals(DateHistogramAggregationBuilder.NAME) && timeZone != null) { + boolean matchingTZ = ((String)agg.get(DateHistoGroupConfig.TIME_ZONE.getPreferredName())) + .equalsIgnoreCase(timeZone); + if (matchingTZ == false) { + incompatibleTimeZones.add((String)agg.get(DateHistoGroupConfig.TIME_ZONE.getPreferredName())); + } + return matchingTZ; + } + // Otherwise just make sure it's one of the three groups + return type.equals(TermsAggregationBuilder.NAME) + || type.equals(DateHistogramAggregationBuilder.NAME) + || type.equals(HistogramAggregationBuilder.NAME); + }) + // Rewrite the field name to our convention (e.g. "foo" -> "date_histogram.foo.timestamp") + .map(agg -> { + if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) { + return RollupField.formatFieldName(fieldName, (String)agg.get(RollupField.AGG), RollupField.TIMESTAMP); + } else { + return RollupField.formatFieldName(fieldName, (String)agg.get(RollupField.AGG), RollupField.VALUE); + } + }) + .collect(Collectors.toList()); + }) + .distinct() + .collect(ArrayList::new, List::addAll, List::addAll); + if (rewrittenFieldNames.isEmpty()) { + if (incompatibleTimeZones.isEmpty()) { + throw new IllegalArgumentException("Field [" + fieldName + "] in [" + builderName + + "] query is not available in selected rollup indices, cannot query."); + } else { + throw new IllegalArgumentException("Field [" + fieldName + "] in [" + builderName + + "] query was found in rollup indices, but requested timezone is not compatible. Options include: " + + incompatibleTimeZones); + } + } else if (rewrittenFieldNames.size() > 1) { + throw new IllegalArgumentException("Ambiguous field name resolution when mapping to rolled fields. Field name [" + + fieldName + "] was mapped to: [" + Strings.collectionToDelimitedString(rewrittenFieldNames, ",") + "]."); + } else { + return rewrittenFieldNames.get(0); + } + } + static RollupSearchContext separateIndices(String[] indices, ImmutableOpenMap indexMetaData) { if (indices.length == 0) { diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java index d9d3e672a0afc..ed21585c7dc0d 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java @@ -25,9 +25,11 @@ import org.elasticsearch.index.query.DisMaxQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchPhraseQueryBuilder; +import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.script.ScriptService; @@ -61,6 +63,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -153,7 +156,7 @@ public void testRangeWrongTZ() { "compatible. Options include: [UTC]")); } - public void testTerms() { + public void testTermQuery() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); group.setTerms(ConfigTestHelpers.getTerms().setFields(Collections.singletonList("foo")).build()); @@ -166,6 +169,23 @@ public void testTerms() { assertThat(((TermQueryBuilder)rewritten).fieldName(), equalTo("foo.terms.value")); } + public void testTermsQuery() { + RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); + GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); + group.setTerms(ConfigTestHelpers.getTerms().setFields(Collections.singletonList("foo")).build()); + job.setGroupConfig(group.build()); + RollupJobCaps cap = new RollupJobCaps(job.build()); + Set caps = new HashSet<>(); + caps.add(cap); + QueryBuilder original = new TermsQueryBuilder("foo", Arrays.asList("bar", "baz")); + QueryBuilder rewritten = + TransportRollupSearchAction.rewriteQuery(original, caps); + assertThat(rewritten, instanceOf(TermsQueryBuilder.class)); + assertNotSame(rewritten, original); + assertThat(((TermsQueryBuilder)rewritten).fieldName(), equalTo("foo.terms.value")); + assertThat(((TermsQueryBuilder)rewritten).values(), equalTo(Arrays.asList("bar", "baz"))); + } + public void testCompounds() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig(); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java index f22a6c87a3ff1..e465c7883cfdd 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java @@ -122,6 +122,37 @@ public void testEmptyIndexPattern() { assertThat(e.getMessage(), equalTo("An index pattern is mandatory.")); } + public void testMatchAllIndexPattern() { + RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); + job.setIndexPattern("*"); + Exception e = expectThrows(IllegalArgumentException.class, job::build); + assertThat(e.getMessage(), equalTo("Index pattern must not match all indices (as it would match it's own rollup index")); + } + + public void testMatchOwnRollupPatternPrefix() { + RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); + job.setIndexPattern("foo-*"); + job.setRollupIndex("foo-rollup"); + Exception e = expectThrows(IllegalArgumentException.class, job::build); + assertThat(e.getMessage(), equalTo("Index pattern would match rollup index name which is not allowed.")); + } + + public void testMatchOwnRollupPatternSuffix() { + RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); + job.setIndexPattern("*-rollup"); + job.setRollupIndex("foo-rollup"); + Exception e = expectThrows(IllegalArgumentException.class, job::build); + assertThat(e.getMessage(), equalTo("Index pattern would match rollup index name which is not allowed.")); + } + + public void testIndexPatternIdenticalToRollup() { + RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); + job.setIndexPattern("foo"); + job.setRollupIndex("foo"); + Exception e = expectThrows(IllegalArgumentException.class, job::build); + assertThat(e.getMessage(), equalTo("Rollup index may not be the same as the index pattern.")); + } + public void testEmptyRollupIndex() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); job.setRollupIndex(""); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 6d12b6472f18f..d2e359990966b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -7,7 +7,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -17,7 +16,6 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; @@ -112,7 +110,6 @@ import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmSettings; -import org.elasticsearch.xpack.core.security.authc.TokenMetaData; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; @@ -934,7 +931,8 @@ public BiConsumer getJoinValidator() { if (enabled) { return new ValidateTLSOnJoin(XPackSettings.TRANSPORT_SSL_ENABLED.get(settings), DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings)) - .andThen(new ValidateUpgradedSecurityIndex()); + .andThen(new ValidateUpgradedSecurityIndex()) + .andThen(new ValidateLicenseCanBeDeserialized()); } return null; } @@ -971,6 +969,17 @@ public void accept(DiscoveryNode node, ClusterState state) { } } + static final class ValidateLicenseCanBeDeserialized implements BiConsumer { + @Override + public void accept(DiscoveryNode node, ClusterState state) { + License license = LicenseService.getLicense(state.metaData()); + if (license != null && license.version() >= License.VERSION_CRYPTO_ALGORITHMS && node.getVersion().before(Version.V_6_4_0)) { + throw new IllegalStateException("node " + node + " is on version [" + node.getVersion() + + "] that cannot deserialize the license format [" + license.version() + "], upgrade node to at least 6.4.0"); + } + } + } + @Override public void reloadSPI(ClassLoader loader) { securityExtensions.addAll(SecurityExtension.loadExtensions(loader)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 190c8703955b1..b1d8d4b67bf7d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.XPackSettings; @@ -278,6 +279,19 @@ public void testTLSJoinValidator() throws Exception { } } + public void testJoinValidatorForLicenseDeserialization() throws Exception { + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), + VersionUtils.randomVersionBetween(random(), null, Version.V_6_3_0)); + MetaData.Builder builder = MetaData.builder(); + License license = TestUtils.generateSignedLicense(null, + randomIntBetween(License.VERSION_CRYPTO_ALGORITHMS, License.VERSION_CURRENT), -1, TimeValue.timeValueHours(24)); + TestUtils.putLicense(builder, license); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build(); + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> new Security.ValidateLicenseCanBeDeserialized().accept(node, state)); + assertThat(e.getMessage(), containsString("cannot deserialize the license format")); + } + public void testIndexJoinValidator_Old_And_Rolling() throws Exception { createComponents(Settings.EMPTY); BiConsumer joinValidator = security.getJoinValidator(); @@ -345,7 +359,7 @@ public void testIndexUpgradeValidatorWithMissingIndex() throws Exception { .nodes(discoveryNodes).build(); joinValidator.accept(node, clusterState); } - + public void testGetFieldFilterSecurityEnabled() throws Exception { createComponents(Settings.EMPTY); Function> fieldFilter = security.getFieldFilter(); diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 new file mode 100644 index 0000000000000..36bf03bbbdb54 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-0a7c3f462f.jar.sha1 @@ -0,0 +1 @@ +003ed080e5184661e606091cd321c229798b22f8 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-1cbadda4d3.jar.sha1 b/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-1cbadda4d3.jar.sha1 deleted file mode 100644 index cea13d14fe1c7..0000000000000 --- a/x-pack/plugin/sql/sql-proto/licenses/lucene-core-7.4.0-snapshot-1cbadda4d3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f345b6aa3c550dafc63de3e5a5c404691e782336 \ No newline at end of file diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml index 568a6261cda9b..717be0d6b250f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml @@ -188,4 +188,3 @@ setup: ] } -