diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties index c0682da029a61..c902a69207108 100644 --- a/.ci/java-versions.properties +++ b/.ci/java-versions.properties @@ -8,4 +8,3 @@ ES_BUILD_JAVA=java11 ES_RUNTIME_JAVA=java8 GRADLE_TASK=build -GRADLE_EXTRA_ARGS=-Dtests.bwc.refspec=elastic/index-lifecycle-6.x diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b77d53be2dc86..2e5f6685ecf9f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -196,11 +196,26 @@ the settings window and/or restart IntelliJ to see your changes take effect. ### Creating A Distribution -To create a distribution from the source, simply run: +Run all build commands from within the root directory: ```sh cd elasticsearch/ -./gradlew assemble +``` + +To build a tar distribution, run this command: + +```sh +./gradlew -p distribution/archives/tar assemble --parallel +``` + +You will find the distribution under: +`./distribution/archives/tar/build/distributions/` + +To create all build artifacts (e.g., plugins and Javadocs) as well as +distributions in all formats, run this command: + +```sh +./gradlew assemble --parallel ``` The package distributions (Debian and RPM) can be found under: @@ -209,7 +224,6 @@ The package distributions (Debian and RPM) can be found under: The archive distributions (tar and zip) can be found under: `./distribution/archives/(tar|zip)/build/distributions/` - ### Running The Full Test Suite Before submitting your changes, run the test suite to make sure that nothing is broken, with: diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index f3735c269bab9..973f0ff429b8d 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -44,9 +44,13 @@ if (project == rootProject) { // we update the version property to reflect if we are building a snapshot or a release build // we write this back out below to load it in the Build.java which will be shown in rest main action // to indicate this being a snapshot build or a release build. -Properties props = VersionPropertiesLoader.loadBuildSrcVersion(project.file('version.properties')) +File propsFile = project.file('version.properties') +Properties props = VersionPropertiesLoader.loadBuildSrcVersion(propsFile) version = props.getProperty("elasticsearch") processResources { + inputs.file(propsFile) + // We need to be explicit with the version because we add snapshot and qualifier to it based on properties + inputs.property("dynamic_elasticsearch_version", props.getProperty("elasticsearch")) doLast { Writer writer = file("$destinationDir/version.properties").newWriter() try { @@ -61,8 +65,8 @@ processResources { * Java version * *****************************************************************************/ -if (JavaVersion.current() < JavaVersion.VERSION_1_10) { - throw new GradleException('At least Java 10 is required to build elasticsearch gradle tools') +if (JavaVersion.current() < JavaVersion.VERSION_11) { + throw new GradleException('At least Java 11 is required to build elasticsearch gradle tools') } // Gradle 4.10 does not support setting this to 11 yet targetCompatibility = "10" @@ -241,7 +245,7 @@ class VersionPropertiesLoader { elasticsearch ) } - String qualifier = systemProperties.getProperty("build.version_qualifier", "alpha1"); + String qualifier = systemProperties.getProperty("build.version_qualifier", ""); if (qualifier.isEmpty() == false) { if (qualifier.matches("(alpha|beta|rc)\\d+") == false) { throw new IllegalStateException("Invalid qualifier: " + qualifier) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index a97989c1167c2..f5b6e6462bce2 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -699,7 +699,7 @@ class BuildPlugin implements Plugin { // this doFirst is added before the info plugin, therefore it will run // after the doFirst added by the info plugin, and we can override attributes jarTask.manifest.attributes( - 'X-Compile-Elasticsearch-Version': VersionProperties.elasticsearch.replace("-SNAPSHOT", ""), + 'X-Compile-Elasticsearch-Version': VersionProperties.elasticsearch, 'X-Compile-Lucene-Version': VersionProperties.lucene, 'X-Compile-Elasticsearch-Snapshot': VersionProperties.isElasticsearchSnapshot(), 'Build-Date': ZonedDateTime.now(ZoneOffset.UTC), diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index 881fce443a792..28566662b4674 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.doc +import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.test.RestTestPlugin import org.gradle.api.Project @@ -37,12 +38,12 @@ public class DocsTestPlugin extends RestTestPlugin { // Docs are published separately so no need to assemble project.tasks.assemble.enabled = false Map defaultSubstitutions = [ - /* These match up with the asciidoc syntax for substitutions but - * the values may differ. In particular {version} needs to resolve - * to the version being built for testing but needs to resolve to - * the last released version for docs. */ - '\\{version\\}': - VersionProperties.elasticsearch.replace('-SNAPSHOT', ''), + /* These match up with the asciidoc syntax for substitutions but + * the values may differ. In particular {version} needs to resolve + * to the version being built for testing but needs to resolve to + * the last released version for docs. */ + '\\{version\\}': Version.fromString(VersionProperties.elasticsearch).toString(), + '\\{version_qualified\\}': VersionProperties.elasticsearch, '\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''), '\\{build_flavor\\}' : project.integTestCluster.distribution.startsWith('oss-') ? 'oss' : 'default', diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy index 633647514ed7d..f0a07515924a0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.plugin +import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.gradle.api.InvalidUserDataException import org.gradle.api.Task @@ -66,17 +67,11 @@ class PluginPropertiesTask extends Copy { } Map generateSubstitutions() { - def stringSnap = { version -> - if (version.endsWith("-SNAPSHOT")) { - return version.substring(0, version.length() - 9) - } - return version - } return [ 'name': extension.name, 'description': extension.description, - 'version': stringSnap(extension.version), - 'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch), + 'version': extension.version, + 'elasticsearchVersion': Version.fromString(VersionProperties.elasticsearch).toString(), 'javaVersion': project.targetCompatibility as String, 'classname': extension.classname, 'extendedPlugins': extension.extendedPlugins.join(','), diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index e8415fa66fd43..fa3db32e6a0c8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -110,6 +110,14 @@ class ClusterConfiguration { return seedNode.transportUri() } + /** + * A closure to call which returns a manually supplied list of unicast seed hosts. + */ + @Input + Closure> otherUnicastHostAddresses = { + Collections.emptyList() + } + /** * A closure to call before the cluster is considered ready. The closure is passed the node info, * as well as a groovy AntBuilder, to enable running ant condition checks. The default wait diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 2c034f6e4f4b9..d2b4d6b3e4c9f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -715,8 +715,9 @@ class ClusterFormationTasks { wait.doLast { Collection unicastHosts = new HashSet<>() - nodes.forEach { otherNode -> - String unicastHost = otherNode.config.unicastTransportUri(otherNode, null, project.ant) + nodes.forEach { node -> + unicastHosts.addAll(node.config.otherUnicastHostAddresses.call()) + String unicastHost = node.config.unicastTransportUri(node, null, project.ant) if (unicastHost != null) { unicastHosts.addAll(Arrays.asList(unicastHost.split(","))) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 0aa57502c39c1..0be2069b1cd93 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -278,9 +278,13 @@ class VagrantTestPlugin implements Plugin { } Task createUpgradeFromFile = project.tasks.create('createUpgradeFromFile', FileContentsTask) { + String version = project.extensions.esvagrant.upgradeFromVersion + if (project.bwcVersions.unreleased.contains(project.extensions.esvagrant.upgradeFromVersion)) { + version += "-SNAPSHOT" + } dependsOn copyPackagingArchives file "${archivesDir}/upgrade_from_version" - contents project.extensions.esvagrant.upgradeFromVersion.toString() + contents version } Task createUpgradeIsOssFile = project.tasks.create('createUpgradeIsOssFile', FileContentsTask) { diff --git a/buildSrc/version.properties b/buildSrc/version.properties index c3c484ae59b1f..c1d759a5d1c28 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0 -lucene = 8.0.0-snapshot-7d0a7782fa +lucene = 8.0.0-snapshot-31d7dfe6b1 # optional dependencies spatial4j = 0.7 @@ -16,6 +16,7 @@ slf4j = 1.6.2 jna = 4.5.1 netty = 4.1.30.Final +joda = 2.10.1 # test dependencies randomizedrunner = 2.7.0 diff --git a/client/benchmark/README.md b/client/benchmark/README.md index 68a910468e0cd..2562a139bec0d 100644 --- a/client/benchmark/README.md +++ b/client/benchmark/README.md @@ -1,7 +1,7 @@ ### Steps to execute the benchmark -1. Build `client-benchmark-noop-api-plugin` with `gradle :client:client-benchmark-noop-api-plugin:assemble` -2. Install it on the target host with `bin/elasticsearch-plugin install file:///full/path/to/client-benchmark-noop-api-plugin.zip` +1. Build `client-benchmark-noop-api-plugin` with `./gradlew :client:client-benchmark-noop-api-plugin:assemble` +2. Install it on the target host with `bin/elasticsearch-plugin install file:///full/path/to/client-benchmark-noop-api-plugin.zip`. 3. Start Elasticsearch on the target host (ideally *not* on the machine that runs the benchmarks) 4. Run the benchmark with @@ -49,7 +49,7 @@ The parameters are all in the `'`s and are in order: Example invocation: ``` -gradlew -p client/benchmark run --args ' rest search localhost geonames {"query":{"match_phrase":{"name":"Sankt Georgen"}}} 500,1000,1100,1200' +./gradlew -p client/benchmark run --args ' rest search localhost geonames {"query":{"match_phrase":{"name":"Sankt Georgen"}}} 500,1000,1100,1200' ``` The parameters are in order: diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java new file mode 100644 index 0000000000000..2857ec970908a --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.ccr.PauseFollowRequest; +import org.elasticsearch.client.core.AcknowledgedResponse; + +import java.io.IOException; +import java.util.Collections; + +/** + * A wrapper for the {@link RestHighLevelClient} that provides methods for + * accessing the Elastic ccr related methods + *

+ * See the + * X-Pack Rollup APIs on elastic.co for more information. + */ +public final class CcrClient { + + private final RestHighLevelClient restHighLevelClient; + + CcrClient(RestHighLevelClient restHighLevelClient) { + this.restHighLevelClient = restHighLevelClient; + } + + /** + * Instructs a follower index the pause the following of a leader index. + * + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse pauseFollow(PauseFollowRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( + request, + CcrRequestConverters::pauseFollow, + options, + AcknowledgedResponse::fromXContent, + Collections.emptySet() + ); + } + + /** + * Asynchronously instruct a follower index the pause the following of a leader index. + * + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public void pauseFollowAsync(PauseFollowRequest request, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( + request, + CcrRequestConverters::pauseFollow, + options, + AcknowledgedResponse::fromXContent, + listener, + Collections.emptySet()); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java new file mode 100644 index 0000000000000..c33ed5e4bf05d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.client.ccr.PauseFollowRequest; + +final class CcrRequestConverters { + + static Request pauseFollow(PauseFollowRequest pauseFollowRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPart(pauseFollowRequest.getFollowerIndex()) + .addPathPartAsIs("_ccr", "pause_follow") + .build(); + return new Request(HttpPost.METHOD_NAME, endpoint); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java index 5e185866f8a89..0ca4f22edf282 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java @@ -78,10 +78,10 @@ static Request deleteLifecyclePolicy(DeleteLifecyclePolicyRequest deleteLifecycl static Request removeIndexLifecyclePolicy(RemoveIndexLifecyclePolicyRequest removePolicyRequest) { String[] indices = removePolicyRequest.indices() == null ? Strings.EMPTY_ARRAY : removePolicyRequest.indices().toArray(new String[] {}); - Request request = new Request(HttpDelete.METHOD_NAME, + Request request = new Request(HttpPost.METHOD_NAME, new RequestConverters.EndpointBuilder() .addCommaSeparatedPathParts(indices) - .addPathPartAsIs("_ilm") + .addPathPartAsIs("_ilm", "remove") .build()); RequestConverters.Params params = new RequestConverters.Params(request); params.withIndicesOptions(removePolicyRequest.indicesOptions()); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/NodesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/NodesResponse.java new file mode 100644 index 0000000000000..24a04bd2da8d0 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/NodesResponse.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; + +/** + * Base class for responses that are node responses. These responses always contain the cluster + * name and the {@link NodesResponseHeader}. + */ +public abstract class NodesResponse { + + private final NodesResponseHeader header; + private final String clusterName; + + protected NodesResponse(NodesResponseHeader header, String clusterName) { + this.header = header; + this.clusterName = clusterName; + } + + /** + * Get the cluster name associated with all of the nodes. + * + * @return Never {@code null}. + */ + public String getClusterName() { + return clusterName; + } + + /** + * Gets information about the number of total, successful and failed nodes the request was run on. + * Also includes exceptions if relevant. + */ + public NodesResponseHeader getHeader() { + return header; + } + + public static void declareCommonNodesResponseParsing(ConstructingObjectParser parser) { + parser.declareObject(ConstructingObjectParser.constructorArg(), NodesResponseHeader::fromXContent, new ParseField("_nodes")); + parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("cluster_name")); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 38dbbb8f1519b..d448275d35845 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -486,9 +486,18 @@ static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { } static Request reindex(ReindexRequest reindexRequest) throws IOException { + return prepareReindexRequest(reindexRequest, true); + } + + static Request submitReindex(ReindexRequest reindexRequest) throws IOException { + return prepareReindexRequest(reindexRequest, false); + } + + private static Request prepareReindexRequest(ReindexRequest reindexRequest, boolean waitForCompletion) throws IOException { String endpoint = new EndpointBuilder().addPathPart("_reindex").build(); Request request = new Request(HttpPost.METHOD_NAME, endpoint); Params params = new Params(request) + .withWaitForCompletion(waitForCompletion) .withRefresh(reindexRequest.isRefresh()) .withTimeout(reindexRequest.getTimeout()) .withWaitForActiveShards(reindexRequest.getWaitForActiveShards()) @@ -897,11 +906,8 @@ Params withDetailed(boolean detailed) { return this; } - Params withWaitForCompletion(boolean waitForCompletion) { - if (waitForCompletion) { - return putParam("wait_for_completion", Boolean.TRUE.toString()); - } - return this; + Params withWaitForCompletion(Boolean waitForCompletion) { + return putParam("wait_for_completion", waitForCompletion.toString()); } Params withNodes(String[] nodes) { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 11fff4c0a6b4d..8b740994e3b6c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -60,6 +60,7 @@ import org.elasticsearch.client.core.CountResponse; import org.elasticsearch.client.core.TermVectorsResponse; import org.elasticsearch.client.core.TermVectorsRequest; +import org.elasticsearch.client.tasks.TaskSubmissionResponse; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.ParseField; @@ -228,6 +229,7 @@ public class RestHighLevelClient implements Closeable { private final SecurityClient securityClient = new SecurityClient(this); private final IndexLifecycleClient ilmClient = new IndexLifecycleClient(this); private final RollupClient rollupClient = new RollupClient(this); + private final CcrClient ccrClient = new CcrClient(this); /** * Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the @@ -321,6 +323,20 @@ public RollupClient rollup() { return rollupClient; } + /** + * Provides methods for accessing the Elastic Licensed CCR APIs that + * are shipped with the Elastic Stack distribution of Elasticsearch. All of + * these APIs will 404 if run against the OSS distribution of Elasticsearch. + *

+ * See the + * CCR APIs on elastic.co for more information. + * + * @return the client wrapper for making CCR API calls + */ + public final CcrClient ccr() { + return ccrClient; + } + /** * Provides a {@link TasksClient} which can be used to access the Tasks API. * @@ -461,6 +477,20 @@ public final BulkByScrollResponse reindex(ReindexRequest reindexRequest, Request ); } + /** + * Submits a reindex task. + * See Reindex API on elastic.co + * @param reindexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the submission response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final TaskSubmissionResponse submitReindexTask(ReindexRequest reindexRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + reindexRequest, RequestConverters::submitReindex, options, TaskSubmissionResponse::fromXContent, emptySet() + ); + } + /** * Asynchronously executes a reindex request. * See Reindex API on elastic.co diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java index aee6eb5efccd5..d3b38aaf9e9d2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java @@ -23,6 +23,8 @@ import org.elasticsearch.client.security.AuthenticateRequest; import org.elasticsearch.client.security.AuthenticateResponse; import org.elasticsearch.client.security.ChangePasswordRequest; +import org.elasticsearch.client.security.ClearRealmCacheRequest; +import org.elasticsearch.client.security.ClearRealmCacheResponse; import org.elasticsearch.client.security.ClearRolesCacheRequest; import org.elasticsearch.client.security.ClearRolesCacheResponse; import org.elasticsearch.client.security.CreateTokenRequest; @@ -241,13 +243,43 @@ public void authenticateAsync(RequestOptions options, ActionListener + * the docs for more. + * + * @param request the request with the realm names and usernames to clear the cache for + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response from the clear realm cache call + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ClearRealmCacheResponse clearRealmCache(ClearRealmCacheRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::clearRealmCache, options, + ClearRealmCacheResponse::fromXContent, emptySet()); + } + + /** + * Clears the cache in one or more realms asynchronously. + * See + * the docs for more. + * + * @param request the request with the realm names and usernames to clear the cache for + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void clearRealmCacheAsync(ClearRealmCacheRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::clearRealmCache, options, + ClearRealmCacheResponse::fromXContent, listener, emptySet()); + } + + /** + * Clears the roles cache for a set of roles. * See * the docs for more. * * @param request the request with the roles for which the cache should be cleared. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response from the enable user call + * @return the response from the clear roles cache call * @throws IOException in case there is a problem sending the request or parsing back the response */ public ClearRolesCacheResponse clearRolesCache(ClearRolesCacheRequest request, RequestOptions options) throws IOException { @@ -256,7 +288,7 @@ public ClearRolesCacheResponse clearRolesCache(ClearRolesCacheRequest request, R } /** - * Clears the native roles cache for a set of roles asynchronously. + * Clears the roles cache for a set of roles asynchronously. * See * the docs for more. * diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java index c8e3fe2b04dfb..5958a763eeebc 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java @@ -23,6 +23,7 @@ import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.security.ClearRealmCacheRequest; import org.elasticsearch.client.security.ClearRolesCacheRequest; import org.elasticsearch.client.security.CreateTokenRequest; import org.elasticsearch.client.security.DeleteRoleMappingRequest; @@ -62,7 +63,7 @@ static Request changePassword(ChangePasswordRequest changePasswordRequest) throw static Request putUser(PutUserRequest putUserRequest) throws IOException { String endpoint = new RequestConverters.EndpointBuilder() .addPathPartAsIs("_xpack/security/user") - .addPathPart(putUserRequest.getUsername()) + .addPathPart(putUserRequest.getUser().getUsername()) .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); request.setEntity(createEntity(putUserRequest, REQUEST_BODY_CONTENT_TYPE)); @@ -112,6 +113,23 @@ private static Request setUserEnabled(SetUserEnabledRequest setUserEnabledReques return request; } + static Request clearRealmCache(ClearRealmCacheRequest clearRealmCacheRequest) { + RequestConverters.EndpointBuilder builder = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_xpack/security/realm"); + if (clearRealmCacheRequest.getRealms().isEmpty() == false) { + builder.addCommaSeparatedPathParts(clearRealmCacheRequest.getRealms().toArray(Strings.EMPTY_ARRAY)); + } else { + builder.addPathPart("_all"); + } + final String endpoint = builder.addPathPartAsIs("_clear_cache").build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + if (clearRealmCacheRequest.getUsernames().isEmpty() == false) { + RequestConverters.Params params = new RequestConverters.Params(request); + params.putParam("usernames", Strings.collectionToCommaDelimitedString(clearRealmCacheRequest.getUsernames())); + } + return request; + } + static Request clearRolesCache(ClearRolesCacheRequest disableCacheRequest) { String endpoint = new RequestConverters.EndpointBuilder() .addPathPartAsIs("_xpack/security/role") diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PauseFollowRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PauseFollowRequest.java new file mode 100644 index 0000000000000..44ac443542caf --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PauseFollowRequest.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.client.Validatable; + +import java.util.Objects; + +public final class PauseFollowRequest implements Validatable { + + private final String followerIndex; + + public PauseFollowRequest(String followerIndex) { + this.followerIndex = Objects.requireNonNull(followerIndex); + } + + public String getFollowerIndex() { + return followerIndex; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/AcknowledgedResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedResponse.java similarity index 83% rename from client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/AcknowledgedResponse.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedResponse.java index 4e279844afc59..f46ea88d473d0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/AcknowledgedResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/AcknowledgedResponse.java @@ -17,13 +17,14 @@ * under the License. */ -package org.elasticsearch.client.rollup; +package org.elasticsearch.client.core; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; @@ -31,9 +32,12 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -public abstract class AcknowledgedResponse implements ToXContentObject { +public class AcknowledgedResponse implements ToXContentObject { protected static final String PARSE_FIELD_NAME = "acknowledged"; + private static final ConstructingObjectParser PARSER = AcknowledgedResponse + .generateParser("acknowledged_response", AcknowledgedResponse::new, AcknowledgedResponse.PARSE_FIELD_NAME); + private final boolean acknowledged; public AcknowledgedResponse(final boolean acknowledged) { @@ -50,6 +54,10 @@ protected static ConstructingObjectParser generateParser(String nam return p; } + public static AcknowledgedResponse fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java index 35734c4a8358a..a4f2cd45a2a26 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java @@ -19,6 +19,7 @@ package org.elasticsearch.client.rollup; +import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java index 31c656b033479..6a93f364c68e6 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.client.rollup; +import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StartRollupJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StartRollupJobResponse.java index b953901ce0c84..be388ba8bc3b7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StartRollupJobResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/StartRollupJobResponse.java @@ -19,6 +19,7 @@ package org.elasticsearch.client.rollup; +import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRealmCacheRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRealmCacheRequest.java new file mode 100644 index 0000000000000..268fc4a1de6e0 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRealmCacheRequest.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.Validatable; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * Request for clearing the cache of one or more realms + */ +public final class ClearRealmCacheRequest implements Validatable { + + private final List realms; + private final List usernames; + + /** + * Create a new request to clear cache of realms + * @param realms the realms to clear the cache of. Must not be {@code null}. An empty list + * indicates that all realms should have their caches cleared. + * @param usernames the usernames to clear the cache of. Must not be {@code null}. An empty + * list indicates that every user in the listed realms should have their cache + * cleared. + */ + public ClearRealmCacheRequest(List realms, List usernames) { + this.realms = Collections.unmodifiableList(Objects.requireNonNull(realms, "the realms list must not be null")); + this.usernames = Collections.unmodifiableList(Objects.requireNonNull(usernames, "usernames list must no be null")); + } + + public List getRealms() { + return realms; + } + + public List getUsernames() { + return usernames; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRealmCacheResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRealmCacheResponse.java new file mode 100644 index 0000000000000..ce1495f9ef2b8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRealmCacheResponse.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.NodesResponseHeader; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; + +/** + * Response for a clear realm cache request. The response includes a header that contains the + * number of successful and failed nodes. + */ +public final class ClearRealmCacheResponse extends SecurityNodesResponse { + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("clear_realm_cache_response_parser", + args -> new ClearRealmCacheResponse((List) args[0], (NodesResponseHeader) args[1], (String) args[2])); + + static { + SecurityNodesResponse.declareCommonNodesResponseParsing(PARSER); + } + + public ClearRealmCacheResponse(List nodes, NodesResponseHeader header, String clusterName) { + super(nodes, header, clusterName); + } + + public static ClearRealmCacheResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRolesCacheResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRolesCacheResponse.java index b6b864a37e226..c7df7e0f492e4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRolesCacheResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRolesCacheResponse.java @@ -20,18 +20,16 @@ package org.elasticsearch.client.security; import org.elasticsearch.client.NodesResponseHeader; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.List; -import java.util.Objects; /** - * The response object that will be returned when clearing the cache of native roles + * The response object that will be returned when clearing the roles cache */ -public final class ClearRolesCacheResponse { +public final class ClearRolesCacheResponse extends SecurityNodesResponse { @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = @@ -39,68 +37,11 @@ public final class ClearRolesCacheResponse { args -> new ClearRolesCacheResponse((List)args[0], (NodesResponseHeader) args[1], (String) args[2])); static { - PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> Node.PARSER.apply(p, n), - new ParseField("nodes")); - PARSER.declareObject(ConstructingObjectParser.constructorArg(), NodesResponseHeader::fromXContent, new ParseField("_nodes")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("cluster_name")); + SecurityNodesResponse.declareCommonNodesResponseParsing(PARSER); } - private final List nodes; - private final NodesResponseHeader header; - private final String clusterName; - public ClearRolesCacheResponse(List nodes, NodesResponseHeader header, String clusterName) { - this.nodes = nodes; - this.header = header; - this.clusterName = Objects.requireNonNull(clusterName, "cluster name must be provided"); - } - - /** returns a list of nodes in which the cache was cleared */ - public List getNodes() { - return nodes; - } - - /** - * Get the cluster name associated with all of the nodes. - * - * @return Never {@code null}. - */ - public String getClusterName() { - return clusterName; - } - - /** - * Gets information about the number of total, successful and failed nodes the request was run on. - * Also includes exceptions if relevant. - */ - public NodesResponseHeader getHeader() { - return header; - } - - public static class Node { - - private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>("clear_roles_cache_response_node", false, (args, id) -> new Node(id, (String) args[0])); - - static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("name")); - } - - private final String id; - private final String name; - - public Node(String id, String name) { - this.id = id; - this.name = name; - } - - public String getId() { - return id; - } - - public String getName() { - return name; - } + super(nodes, header, clusterName); } public static ClearRolesCacheResponse fromXContent(XContentParser parser) throws IOException { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserRequest.java index 11e13f621e6a7..66af9fca31cb2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutUserRequest.java @@ -21,15 +21,14 @@ import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; +import org.elasticsearch.client.security.user.User; import org.elasticsearch.common.CharArrays; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -38,62 +37,33 @@ */ public final class PutUserRequest implements Validatable, ToXContentObject { - private final String username; - private final List roles; - private final String fullName; - private final String email; - private final Map metadata; - private final char[] password; + private final User user; + private final @Nullable char[] password; private final boolean enabled; private final RefreshPolicy refreshPolicy; /** * Creates a new request that is used to create or update a user in the native realm. * - * @param username the username of the user to be created or updated + * @param user the user to be created or updated * @param password the password of the user. The password array is not modified by this class. * It is the responsibility of the caller to clear the password after receiving * a response. - * @param roles the roles that this user is assigned - * @param fullName the full name of the user that may be used for display purposes - * @param email the email address of the user * @param enabled true if the user is enabled and allowed to access elasticsearch - * @param metadata a map of additional user attributes that may be used in templating roles * @param refreshPolicy the refresh policy for the request. */ - public PutUserRequest(String username, char[] password, List roles, String fullName, String email, boolean enabled, - Map metadata, RefreshPolicy refreshPolicy) { - this.username = Objects.requireNonNull(username, "username is required"); + public PutUserRequest(User user, @Nullable char[] password, boolean enabled, @Nullable RefreshPolicy refreshPolicy) { + this.user = Objects.requireNonNull(user, "user is required, cannot be null"); this.password = password; - this.roles = Collections.unmodifiableList(Objects.requireNonNull(roles, "roles must be specified")); - this.fullName = fullName; - this.email = email; this.enabled = enabled; - this.metadata = metadata == null ? Collections.emptyMap() : Collections.unmodifiableMap(metadata); this.refreshPolicy = refreshPolicy == null ? RefreshPolicy.getDefault() : refreshPolicy; } - public String getUsername() { - return username; + public User getUser() { + return user; } - public List getRoles() { - return roles; - } - - public String getFullName() { - return fullName; - } - - public String getEmail() { - return email; - } - - public Map getMetadata() { - return metadata; - } - - public char[] getPassword() { + public @Nullable char[] getPassword() { return password; } @@ -109,29 +79,25 @@ public RefreshPolicy getRefreshPolicy() { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - PutUserRequest that = (PutUserRequest) o; - return enabled == that.enabled && - Objects.equals(username, that.username) && - Objects.equals(roles, that.roles) && - Objects.equals(fullName, that.fullName) && - Objects.equals(email, that.email) && - Objects.equals(metadata, that.metadata) && - Arrays.equals(password, that.password) && - refreshPolicy == that.refreshPolicy; + final PutUserRequest that = (PutUserRequest) o; + return Objects.equals(user, that.user) + && Arrays.equals(password, that.password) + && enabled == that.enabled + && refreshPolicy == that.refreshPolicy; } @Override public int hashCode() { - int result = Objects.hash(username, roles, fullName, email, metadata, enabled, refreshPolicy); + int result = Objects.hash(user, enabled, refreshPolicy); result = 31 * result + Arrays.hashCode(password); return result; } @Override public Optional validate() { - if (metadata != null && metadata.keySet().stream().anyMatch(s -> s.startsWith("_"))) { + if (user.getMetadata() != null && user.getMetadata().keySet().stream().anyMatch(s -> s.startsWith("_"))) { ValidationException validationException = new ValidationException(); - validationException.addValidationError("metadata keys may not start with [_]"); + validationException.addValidationError("user metadata keys may not start with [_]"); return Optional.of(validationException); } return Optional.empty(); @@ -140,7 +106,7 @@ public Optional validate() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field("username", username); + builder.field("username", user.getUsername()); if (password != null) { byte[] charBytes = CharArrays.toUtf8Bytes(password); try { @@ -149,18 +115,15 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws Arrays.fill(charBytes, (byte) 0); } } - if (roles != null) { - builder.field("roles", roles); - } - if (fullName != null) { - builder.field("full_name", fullName); - } - if (email != null) { - builder.field("email", email); + builder.field("roles", user.getRoles()); + if (user.getFullName() != null) { + builder.field("full_name", user.getFullName()); } - if (metadata != null) { - builder.field("metadata", metadata); + if (user.getEmail() != null) { + builder.field("email", user.getEmail()); } + builder.field("metadata", user.getMetadata()); + builder.field("enabled", enabled); return builder.endObject(); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/SecurityNodesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/SecurityNodesResponse.java new file mode 100644 index 0000000000000..22b9e8220e743 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/SecurityNodesResponse.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.NodesResponse; +import org.elasticsearch.client.NodesResponseHeader; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; + +import java.util.List; + +/** + * Base class for security responses that are node responses. Security uses a common pattern in the + * response so this class is present to avoid duplication. + */ +public abstract class SecurityNodesResponse extends NodesResponse { + + private final List nodes; + + SecurityNodesResponse(List nodes, NodesResponseHeader header, String clusterName) { + super(header, clusterName); + this.nodes = nodes; + } + + /** returns a list of nodes in which the cache was cleared */ + public List getNodes() { + return nodes; + } + + public static class Node { + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("clear_roles_cache_response_node", false, + (args, id) -> new ClearRolesCacheResponse.Node(id, (String) args[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("name")); + } + + private final String id; + private final String name; + + public Node(String id, String name) { + this.id = id; + this.name = name; + } + + public String getId() { + return id; + } + + public String getName() { + return name; + } + } + + public static void declareCommonNodesResponseParsing(ConstructingObjectParser parser) { + parser.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> Node.PARSER.apply(p, n), + new ParseField("nodes")); + NodesResponse.declareCommonNodesResponseParsing(parser); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/User.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/User.java index 977780b46b79b..ba6cd5f2f8ef5 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/User.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/User.java @@ -24,38 +24,59 @@ import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.Map; import java.util.Objects; +import java.util.Set; /** - * An authenticated user + * A user to be utilized with security APIs. + * Can be an existing authenticated user or it can be a new user to be enrolled to the native realm. */ public final class User { private final String username; - private final Collection roles; + private final Set roles; private final Map metadata; @Nullable private final String fullName; @Nullable private final String email; + /** + * Builds the user to be utilized with security APIs. + * + * @param username the username, also known as the principal, unique for in the scope of a realm + * @param roles the roles that this user is assigned + * @param metadata a map of additional user attributes that may be used in templating roles + * @param fullName the full name of the user that may be used for display purposes + * @param email the email address of the user + */ public User(String username, Collection roles, Map metadata, @Nullable String fullName, @Nullable String email) { - Objects.requireNonNull(username, "`username` cannot be null"); - Objects.requireNonNull(roles, "`roles` cannot be null. Pass an empty collection instead."); - Objects.requireNonNull(roles, "`metadata` cannot be null. Pass an empty map instead."); - this.username = username; - this.roles = roles; - this.metadata = Collections.unmodifiableMap(metadata); + this.username = username = Objects.requireNonNull(username, "`username` is required, cannot be null"); + this.roles = Collections.unmodifiableSet(new HashSet<>( + Objects.requireNonNull(roles, "`roles` is required, cannot be null. Pass an empty Collection instead."))); + this.metadata = Collections + .unmodifiableMap(Objects.requireNonNull(metadata, "`metadata` is required, cannot be null. Pass an empty map instead.")); this.fullName = fullName; this.email = email; } + /** + * Builds the user to be utilized with security APIs. + * + * @param username the username, also known as the principal, unique for in the scope of a realm + * @param roles the roles that this user is assigned + */ + public User(String username, Collection roles) { + this(username, roles, Collections.emptyMap(), null, null); + } + /** * @return The principal of this user - effectively serving as the * unique identity of the user. Can never be {@code null}. */ - public String username() { + public String getUsername() { return this.username; } @@ -64,28 +85,28 @@ public String username() { * identified by their unique names and each represents as * set of permissions. Can never be {@code null}. */ - public Collection roles() { + public Set getRoles() { return this.roles; } /** * @return The metadata that is associated with this user. Can never be {@code null}. */ - public Map metadata() { + public Map getMetadata() { return metadata; } /** * @return The full name of this user. May be {@code null}. */ - public @Nullable String fullName() { + public @Nullable String getFullName() { return fullName; } /** * @return The email of this user. May be {@code null}. */ - public @Nullable String email() { + public @Nullable String getEmail() { return email; } @@ -103,28 +124,14 @@ public String toString() { @Override public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o instanceof User == false) { - return false; - } - - final User user = (User) o; - - if (!username.equals(user.username)) { - return false; - } - if (!roles.equals(user.roles)) { - return false; - } - if (!metadata.equals(user.metadata)) { - return false; - } - if (fullName != null ? !fullName.equals(user.fullName) : user.fullName != null) { - return false; - } - return !(email != null ? !email.equals(user.email) : user.email != null); + if (this == o) return true; + if (o == null || this.getClass() != o.getClass()) return false; + final User that = (User) o; + return Objects.equals(username, that.username) + && Objects.equals(roles, that.roles) + && Objects.equals(metadata, that.metadata) + && Objects.equals(fullName, that.fullName) + && Objects.equals(email, that.email); } @Override diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/ApplicationResourcePrivileges.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/ApplicationResourcePrivileges.java new file mode 100644 index 0000000000000..8846e259e26b3 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/ApplicationResourcePrivileges.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security.user.privileges; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Represents privileges over resources that are scoped under an application. + * The application, resources and privileges are completely managed by the + * client and can be arbitrary string identifiers. Elasticsearch is not + * concerned by any resources under an application scope. + */ +public final class ApplicationResourcePrivileges implements ToXContentObject { + + private static final ParseField APPLICATION = new ParseField("application"); + private static final ParseField PRIVILEGES = new ParseField("privileges"); + private static final ParseField RESOURCES = new ParseField("resources"); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "application_privileges", false, constructorObjects -> { + // Don't ignore unknown fields. It is dangerous if the object we parse is also + // part of a request that we build later on, and the fields that we now ignore will + // end up being implicitly set to null in that request. + int i = 0; + final String application = (String) constructorObjects[i++]; + final Collection privileges = (Collection) constructorObjects[i++]; + final Collection resources = (Collection) constructorObjects[i]; + return new ApplicationResourcePrivileges(application, privileges, resources); + }); + + static { + PARSER.declareString(constructorArg(), APPLICATION); + PARSER.declareStringArray(constructorArg(), PRIVILEGES); + PARSER.declareStringArray(constructorArg(), RESOURCES); + } + + private final String application; + private final Set privileges; + private final Set resources; + + /** + * Constructs privileges for resources under an application scope. + * + * @param application + * The application name. This identifier is completely under the + * clients control. + * @param privileges + * The privileges names. Cannot be null or empty. Privilege + * identifiers are completely under the clients control. + * @param resources + * The resources names. Cannot be null or empty. Resource identifiers + * are completely under the clients control. + */ + public ApplicationResourcePrivileges(String application, Collection privileges, Collection resources) { + if (Strings.isNullOrEmpty(application)) { + throw new IllegalArgumentException("application privileges must have an application name"); + } + if (null == privileges || privileges.isEmpty()) { + throw new IllegalArgumentException("application privileges must define at least one privilege"); + } + if (null == resources || resources.isEmpty()) { + throw new IllegalArgumentException("application privileges must refer to at least one resource"); + } + this.application = application; + this.privileges = Collections.unmodifiableSet(new HashSet<>(privileges)); + this.resources = Collections.unmodifiableSet(new HashSet<>(resources)); + } + + public String getApplication() { + return application; + } + + public Set getResources() { + return this.resources; + } + + public Set getPrivileges() { + return this.privileges; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || this.getClass() != o.getClass()) { + return false; + } + ApplicationResourcePrivileges that = (ApplicationResourcePrivileges) o; + return application.equals(that.application) + && privileges.equals(that.privileges) + && resources.equals(that.resources); + } + + @Override + public int hashCode() { + return Objects.hash(application, privileges, resources); + } + + @Override + public String toString() { + try { + return XContentHelper.toXContent(this, XContentType.JSON, true).utf8ToString(); + } catch (IOException e) { + throw new RuntimeException("Unexpected", e); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(APPLICATION.getPreferredName(), application); + builder.field(PRIVILEGES.getPreferredName(), privileges); + builder.field(RESOURCES.getPreferredName(), resources); + return builder.endObject(); + } + + public static ApplicationResourcePrivileges fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + +} \ No newline at end of file diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/GlobalOperationPrivilege.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/GlobalOperationPrivilege.java new file mode 100644 index 0000000000000..507d6a5a1956f --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/GlobalOperationPrivilege.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security.user.privileges; + +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * Represents generic global cluster privileges that can be scoped by categories + * and then further by operations. The privilege's syntactic and semantic + * meaning is specific to each category and operation; there is no general + * definition template. It is not permitted to define different privileges under + * the same category and operation. + */ +public class GlobalOperationPrivilege { + + private final String category; + private final String operation; + private final Map privilege; + + /** + * Constructs privileges under a specific {@code category} and for some + * {@code operation}. The privilege definition is flexible, it is a {@code Map}, + * and the semantics is bound to the {@code category} and {@code operation}. + * + * @param category + * The category of the privilege. + * @param operation + * The operation of the privilege. + * @param privilege + * The privilege definition. + */ + public GlobalOperationPrivilege(String category, String operation, Map privilege) { + this.category = Objects.requireNonNull(category); + this.operation = Objects.requireNonNull(operation); + if (privilege == null || privilege.isEmpty()) { + throw new IllegalArgumentException("Privileges cannot be empty or null"); + } + this.privilege = Collections.unmodifiableMap(privilege); + } + + public String getCategory() { + return category; + } + + public String getOperation() { + return operation; + } + + public Map getRaw() { + return privilege; + } + + public static GlobalOperationPrivilege fromXContent(String category, String operation, XContentParser parser) throws IOException { + // parser is still placed on the field name, advance to next token (field value) + assert parser.currentToken().equals(XContentParser.Token.FIELD_NAME); + parser.nextToken(); + return new GlobalOperationPrivilege(category, operation, parser.map()); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || (false == this instanceof GlobalOperationPrivilege)) { + return false; + } + final GlobalOperationPrivilege that = (GlobalOperationPrivilege) o; + return category.equals(that.category) && operation.equals(that.operation) && privilege.equals(that.privilege); + } + + @Override + public int hashCode() { + return Objects.hash(category, operation, privilege); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/GlobalPrivileges.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/GlobalPrivileges.java new file mode 100644 index 0000000000000..891980765427e --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/GlobalPrivileges.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security.user.privileges; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Represents global privileges. "Global Privilege" is a mantra for granular + * generic cluster privileges. These privileges are organized into categories. + * Elasticsearch defines the set of categories. Under each category there are + * operations that are under the clients jurisdiction. The privilege is hence + * defined under an operation under a category. + */ +public final class GlobalPrivileges implements ToXContentObject { + + // When categories change, adapting this field should suffice. Categories are NOT + // opaque "named_objects", we wish to maintain control over these namespaces + static final List CATEGORIES = Collections.unmodifiableList(Arrays.asList("application")); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("global_category_privileges", + false, constructorObjects -> { + // ignore_unknown_fields is irrelevant here anyway, but let's keep it to false + // because this conveys strictness (woop woop) + return new GlobalPrivileges((Collection) constructorObjects[0]); + }); + + static { + for (final String category : CATEGORIES) { + PARSER.declareNamedObjects(optionalConstructorArg(), + (parser, context, operation) -> GlobalOperationPrivilege.fromXContent(category, operation, parser), + new ParseField(category)); + } + } + + private final Set privileges; + // same data as in privileges but broken down by categories; internally, it is + // easier to work with this structure + private final Map> privilegesByCategoryMap; + + /** + * Constructs global privileges by bundling the set of privileges. + * + * @param privileges + * The privileges under a category and for an operation in that category. + */ + public GlobalPrivileges(Collection privileges) { + if (privileges == null || privileges.isEmpty()) { + throw new IllegalArgumentException("Privileges cannot be empty or null"); + } + // duplicates are just ignored + this.privileges = Collections.unmodifiableSet(new HashSet<>(Objects.requireNonNull(privileges))); + this.privilegesByCategoryMap = Collections + .unmodifiableMap(this.privileges.stream().collect(Collectors.groupingBy(GlobalOperationPrivilege::getCategory))); + for (final Map.Entry> privilegesByCategory : privilegesByCategoryMap.entrySet()) { + // all operations for a specific category + final Set allOperations = privilegesByCategory.getValue().stream().map(p -> p.getOperation()) + .collect(Collectors.toSet()); + if (allOperations.size() != privilegesByCategory.getValue().size()) { + throw new IllegalArgumentException("Different privileges for the same category and operation are not permitted"); + } + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + for (final Map.Entry> privilegesByCategory : this.privilegesByCategoryMap.entrySet()) { + builder.startObject(privilegesByCategory.getKey()); + for (final GlobalOperationPrivilege privilege : privilegesByCategory.getValue()) { + builder.field(privilege.getOperation(), privilege.getRaw()); + } + builder.endObject(); + } + return builder.endObject(); + } + + public static GlobalPrivileges fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public Set getPrivileges() { + return privileges; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || this.getClass() != o.getClass()) { + return false; + } + final GlobalPrivileges that = (GlobalPrivileges) o; + return privileges.equals(that.privileges); + } + + @Override + public int hashCode() { + return Objects.hash(privileges); + } + +} \ No newline at end of file diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/IndicesPrivileges.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/IndicesPrivileges.java new file mode 100644 index 0000000000000..e693a4fea34fa --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/IndicesPrivileges.java @@ -0,0 +1,309 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security.user.privileges; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Represents privileges over indices. There is a canonical set of privilege + * names (eg. {@code IndicesPrivileges#READ_PRIVILEGE_NAME}) but there is + * flexibility in the definition of finer grained, more specialized, privileges. + * This also encapsulates field and document level security privileges. These + * allow to control what fields or documents are readable or queryable. + */ +public final class IndicesPrivileges implements ToXContentObject { + + public static final ParseField NAMES = new ParseField("names"); + public static final ParseField PRIVILEGES = new ParseField("privileges"); + public static final ParseField FIELD_PERMISSIONS = new ParseField("field_security"); + public static final ParseField GRANT_FIELDS = new ParseField("grant"); + public static final ParseField EXCEPT_FIELDS = new ParseField("except"); + public static final ParseField QUERY = new ParseField("query"); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("indices_privileges", false, constructorObjects -> { + int i = 0; + final Collection indices = (Collection) constructorObjects[i++]; + final Collection privileges = (Collection) constructorObjects[i++]; + final Tuple, Collection> fields = + (Tuple, Collection>) constructorObjects[i++]; + final Collection grantFields = fields != null ? fields.v1() : null; + final Collection exceptFields = fields != null ? fields.v2() : null; + final String query = (String) constructorObjects[i]; + return new IndicesPrivileges(indices, privileges, grantFields, exceptFields, query); + }); + + static { + @SuppressWarnings("unchecked") + final ConstructingObjectParser, Collection>, Void> fls_parser = + new ConstructingObjectParser<>( "field_level_parser", false, constructorObjects -> { + int i = 0; + final Collection grantFields = (Collection) constructorObjects[i++]; + final Collection exceptFields = (Collection) constructorObjects[i]; + return new Tuple<>(grantFields, exceptFields); + }); + fls_parser.declareStringArray(optionalConstructorArg(), GRANT_FIELDS); + fls_parser.declareStringArray(optionalConstructorArg(), EXCEPT_FIELDS); + + PARSER.declareStringArray(constructorArg(), NAMES); + PARSER.declareStringArray(constructorArg(), PRIVILEGES); + PARSER.declareObject(optionalConstructorArg(), fls_parser, FIELD_PERMISSIONS); + PARSER.declareStringOrNull(optionalConstructorArg(), QUERY); + } + + private final Set indices; + private final Set privileges; + // null or singleton '*' means all fields are granted, empty means no fields are granted + private final @Nullable Set grantedFields; + // null or empty means no fields are denied + private final @Nullable Set deniedFields; + // missing query means all documents, i.e. no restrictions + private final @Nullable String query; + + private IndicesPrivileges(Collection indices, Collection privileges, @Nullable Collection grantedFields, + @Nullable Collection deniedFields, @Nullable String query) { + if (null == indices || indices.isEmpty()) { + throw new IllegalArgumentException("indices privileges must refer to at least one index name or index name pattern"); + } + if (null == privileges || privileges.isEmpty()) { + throw new IllegalArgumentException("indices privileges must define at least one privilege"); + } + this.indices = Collections.unmodifiableSet(new HashSet<>(indices)); + this.privileges = Collections.unmodifiableSet(new HashSet<>(privileges)); + // unspecified granted fields means no restriction + this.grantedFields = grantedFields == null ? null : Collections.unmodifiableSet(new HashSet<>(grantedFields)); + // unspecified denied fields means no restriction + this.deniedFields = deniedFields == null ? null : Collections.unmodifiableSet(new HashSet<>(deniedFields)); + this.query = query; + } + + /** + * The indices names covered by the privileges. + */ + public Set getIndices() { + return this.indices; + } + + /** + * The privileges acting over indices. There is a canonical predefined set of + * such privileges, but the {@code String} datatype allows for flexibility in defining + * finer grained privileges. + */ + public Set getPrivileges() { + return this.privileges; + } + + /** + * The document fields that can be read or queried. Can be null, in this case + * all the document's fields are granted access to. Can also be empty, in which + * case no fields are granted access to. + */ + public @Nullable Set getGrantedFields() { + return this.grantedFields; + } + + /** + * The document fields that cannot be accessed or queried. Can be null or empty, + * in which case no fields are denied. + */ + public @Nullable Set getDeniedFields() { + return this.deniedFields; + } + + /** + * A query limiting the visible documents in the indices. Can be null, in which + * case all documents are visible. + */ + public @Nullable String getQuery() { + return this.query; + } + + /** + * If {@code true} some documents might not be visible. Only the documents + * matching {@code query} will be readable. + */ + public boolean isUsingDocumentLevelSecurity() { + return query != null; + } + + /** + * If {@code true} some document fields might not be visible. + */ + public boolean isUsingFieldLevelSecurity() { + return limitsGrantedFields() || hasDeniedFields(); + } + + private boolean hasDeniedFields() { + return deniedFields != null && false == deniedFields.isEmpty(); + } + + private boolean limitsGrantedFields() { + // we treat just '*' as no FLS since that's what the UI defaults to + if (grantedFields == null || (grantedFields.size() == 1 && grantedFields.iterator().next().equals("*"))) { + return false; + } + return true; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + IndicesPrivileges that = (IndicesPrivileges) o; + return indices.equals(that.indices) + && privileges.equals(that.privileges) + && Objects.equals(grantedFields, that.grantedFields) + && Objects.equals(deniedFields, that.deniedFields) + && Objects.equals(query, that.query); + } + + @Override + public int hashCode() { + return Objects.hash(indices, privileges, grantedFields, deniedFields, query); + } + + @Override + public String toString() { + try { + return XContentHelper.toXContent(this, XContentType.JSON, true).utf8ToString(); + } catch (IOException e) { + throw new RuntimeException("Unexpected", e); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NAMES.getPreferredName(), indices); + builder.field(PRIVILEGES.getPreferredName(), privileges); + if (isUsingFieldLevelSecurity()) { + builder.startObject(FIELD_PERMISSIONS.getPreferredName()); + if (grantedFields != null) { + builder.field(GRANT_FIELDS.getPreferredName(), grantedFields); + } + if (hasDeniedFields()) { + builder.field(EXCEPT_FIELDS.getPreferredName(), deniedFields); + } + builder.endObject(); + } + if (isUsingDocumentLevelSecurity()) { + builder.field("query", query); + } + return builder.endObject(); + } + + public static IndicesPrivileges fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public static Builder builder() { + return new Builder(); + } + + public static final class Builder { + + private @Nullable Collection indices = null; + private @Nullable Collection privileges = null; + private @Nullable Collection grantedFields = null; + private @Nullable Collection deniedFields = null; + private @Nullable String query = null; + + private Builder() { + } + + public Builder indices(String... indices) { + return indices(Arrays.asList(Objects.requireNonNull(indices, "indices required"))); + } + + public Builder indices(Collection indices) { + this.indices = Objects.requireNonNull(indices, "indices required"); + return this; + } + + public Builder privileges(String... privileges) { + return privileges(Arrays.asList(Objects.requireNonNull(privileges, "privileges required"))); + } + + public Builder privileges(Collection privileges) { + this.privileges = Objects.requireNonNull(privileges, "privileges required"); + return this; + } + + public Builder grantedFields(@Nullable String... grantedFields) { + if (grantedFields == null) { + this.grantedFields = null; + return this; + } + return grantedFields(Arrays.asList(grantedFields)); + } + + public Builder grantedFields(@Nullable Collection grantedFields) { + this.grantedFields = grantedFields; + return this; + } + + public Builder deniedFields(@Nullable String... deniedFields) { + if (deniedFields == null) { + this.deniedFields = null; + return this; + } + return deniedFields(Arrays.asList(deniedFields)); + } + + public Builder deniedFields(@Nullable Collection deniedFields) { + this.deniedFields = deniedFields; + return this; + } + + public Builder query(@Nullable String query) { + this.query = query; + return this; + } + + public IndicesPrivileges build() { + return new IndicesPrivileges(indices, privileges, grantedFields, deniedFields, query); + } + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/ManageApplicationPrivilege.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/ManageApplicationPrivilege.java new file mode 100644 index 0000000000000..9356c2ef0e867 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/ManageApplicationPrivilege.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security.user.privileges; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; + +/** + * Represents the privilege to "manage" certain applications. The "manage" + * privilege is actually defined outside of Elasticsearch. + */ +public class ManageApplicationPrivilege extends GlobalOperationPrivilege { + + private static final String CATEGORY = "application"; + private static final String OPERATION = "manage"; + private static final String KEY = "applications"; + + public ManageApplicationPrivilege(Collection applications) { + super(CATEGORY, OPERATION, Collections.singletonMap(KEY, new HashSet(Objects.requireNonNull(applications)))); + } + + @SuppressWarnings("unchecked") + public Set getManagedApplications() { + return (Set)getRaw().get(KEY); + } + + @Override + public boolean equals(Object o) { + return super.equals(o); + } + + @Override + public int hashCode() { + return super.hashCode(); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java new file mode 100644 index 0000000000000..78265196ee819 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/privileges/Role.java @@ -0,0 +1,310 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security.user.privileges; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Represents an aggregation of privileges. This does not have a name + * identifier. + */ +public final class Role implements ToXContentObject { + + public static final ParseField CLUSTER = new ParseField("cluster"); + public static final ParseField GLOBAL = new ParseField("global"); + public static final ParseField INDICES = new ParseField("indices"); + public static final ParseField APPLICATIONS = new ParseField("applications"); + public static final ParseField RUN_AS = new ParseField("run_as"); + public static final ParseField METADATA = new ParseField("metadata"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("role_descriptor", false, + constructorObjects -> { + // Don't ignore unknown fields. It is dangerous if the object we parse is also + // part of a request that we build later on, and the fields that we now ignore + // will end up being implicitly set to null in that request. + int i = 0; + final Collection clusterPrivileges = (Collection) constructorObjects[i++]; + final GlobalPrivileges globalApplicationPrivileges = (GlobalPrivileges) constructorObjects[i++]; + final Collection indicesPrivileges = (Collection) constructorObjects[i++]; + final Collection applicationResourcePrivileges = + (Collection) constructorObjects[i++]; + final Collection runAsPrivilege = (Collection) constructorObjects[i++]; + final Map metadata = (Map) constructorObjects[i]; + return new Role(clusterPrivileges, globalApplicationPrivileges, indicesPrivileges, applicationResourcePrivileges, + runAsPrivilege, metadata); + }); + + static { + PARSER.declareStringArray(optionalConstructorArg(), CLUSTER); + PARSER.declareObject(optionalConstructorArg(), GlobalPrivileges.PARSER, GLOBAL); + PARSER.declareFieldArray(optionalConstructorArg(), IndicesPrivileges.PARSER, INDICES, ValueType.OBJECT_ARRAY); + PARSER.declareFieldArray(optionalConstructorArg(), ApplicationResourcePrivileges.PARSER, APPLICATIONS, ValueType.OBJECT_ARRAY); + PARSER.declareStringArray(optionalConstructorArg(), RUN_AS); + PARSER.declareObject(constructorArg(), (parser, c) -> parser.map(), METADATA); + } + + private final Set clusterPrivileges; + private final @Nullable GlobalPrivileges globalApplicationPrivileges; + private final Set indicesPrivileges; + private final Set applicationResourcePrivileges; + private final Set runAsPrivilege; + private final Map metadata; + + private Role(@Nullable Collection clusterPrivileges, @Nullable GlobalPrivileges globalApplicationPrivileges, + @Nullable Collection indicesPrivileges, + @Nullable Collection applicationResourcePrivileges, @Nullable Collection runAsPrivilege, + @Nullable Map metadata) { + // no cluster privileges are granted unless otherwise specified + this.clusterPrivileges = Collections + .unmodifiableSet(clusterPrivileges != null ? new HashSet<>(clusterPrivileges) : Collections.emptySet()); + this.globalApplicationPrivileges = globalApplicationPrivileges; + // no indices privileges are granted unless otherwise specified + this.indicesPrivileges = Collections + .unmodifiableSet(indicesPrivileges != null ? new HashSet<>(indicesPrivileges) : Collections.emptySet()); + // no application resource privileges are granted unless otherwise specified + this.applicationResourcePrivileges = Collections.unmodifiableSet( + applicationResourcePrivileges != null ? new HashSet<>(applicationResourcePrivileges) : Collections.emptySet()); + // no run as privileges are granted unless otherwise specified + this.runAsPrivilege = Collections.unmodifiableSet(runAsPrivilege != null ? new HashSet<>(runAsPrivilege) : Collections.emptySet()); + this.metadata = metadata != null ? Collections.unmodifiableMap(metadata) : Collections.emptyMap(); + } + + public Set getClusterPrivileges() { + return clusterPrivileges; + } + + public GlobalPrivileges getGlobalApplicationPrivileges() { + return globalApplicationPrivileges; + } + + public Set getIndicesPrivileges() { + return indicesPrivileges; + } + + public Set getApplicationResourcePrivileges() { + return applicationResourcePrivileges; + } + + public Set getRunAsPrivilege() { + return runAsPrivilege; + } + + public Map getMetadata() { + return metadata; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Role that = (Role) o; + return clusterPrivileges.equals(that.clusterPrivileges) + && Objects.equals(globalApplicationPrivileges, that.globalApplicationPrivileges) + && indicesPrivileges.equals(that.indicesPrivileges) + && applicationResourcePrivileges.equals(that.applicationResourcePrivileges) + && runAsPrivilege.equals(that.runAsPrivilege) + && metadata.equals(that.metadata); + } + + @Override + public int hashCode() { + return Objects.hash(clusterPrivileges, globalApplicationPrivileges, indicesPrivileges, applicationResourcePrivileges, + runAsPrivilege, metadata); + } + + @Override + public String toString() { + try { + return XContentHelper.toXContent(this, XContentType.JSON, true).utf8ToString(); + } catch (IOException e) { + throw new RuntimeException("Unexpected", e); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (false == clusterPrivileges.isEmpty()) { + builder.field(CLUSTER.getPreferredName(), clusterPrivileges); + } + if (null != globalApplicationPrivileges) { + builder.field(GLOBAL.getPreferredName(), globalApplicationPrivileges); + } + if (false == indicesPrivileges.isEmpty()) { + builder.field(INDICES.getPreferredName(), indicesPrivileges); + } + if (false == applicationResourcePrivileges.isEmpty()) { + builder.field(APPLICATIONS.getPreferredName(), applicationResourcePrivileges); + } + if (false == runAsPrivilege.isEmpty()) { + builder.field(RUN_AS.getPreferredName(), runAsPrivilege); + } + if (false == metadata.isEmpty()) { + builder.field(METADATA.getPreferredName(), metadata); + } + return builder.endObject(); + } + + public static Role fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public static Builder builder() { + return new Builder(); + } + + public static final class Builder { + + private @Nullable Collection clusterPrivileges = null; + private @Nullable GlobalPrivileges globalApplicationPrivileges = null; + private @Nullable Collection indicesPrivileges = null; + private @Nullable Collection applicationResourcePrivileges = null; + private @Nullable Collection runAsPrivilege = null; + private @Nullable Map metadata = null; + + private Builder() { + } + + public Builder clusterPrivileges(String... clusterPrivileges) { + return clusterPrivileges(Arrays + .asList(Objects.requireNonNull(clusterPrivileges, "Cluster privileges cannot be null. Pass an empty array instead."))); + } + + public Builder clusterPrivileges(Collection clusterPrivileges) { + this.clusterPrivileges = Objects.requireNonNull(clusterPrivileges, + "Cluster privileges cannot be null. Pass an empty collection instead."); + return this; + } + + public Builder glabalApplicationPrivileges(GlobalPrivileges globalApplicationPrivileges) { + this.globalApplicationPrivileges = globalApplicationPrivileges; + return this; + } + + public Builder indicesPrivileges(IndicesPrivileges... indicesPrivileges) { + return indicesPrivileges(Arrays + .asList(Objects.requireNonNull(indicesPrivileges, "Indices privileges cannot be null. Pass an empty array instead."))); + } + + public Builder indicesPrivileges(Collection indicesPrivileges) { + this.indicesPrivileges = Objects.requireNonNull(indicesPrivileges, + "Indices privileges cannot be null. Pass an empty collection instead."); + return this; + } + + public Builder applicationResourcePrivileges(ApplicationResourcePrivileges... applicationResourcePrivileges) { + return applicationResourcePrivileges(Arrays.asList(Objects.requireNonNull(applicationResourcePrivileges, + "Application resource privileges cannot be null. Pass an empty array instead."))); + } + + public Builder applicationResourcePrivileges(Collection applicationResourcePrivileges) { + this.applicationResourcePrivileges = Objects.requireNonNull(applicationResourcePrivileges, + "Application resource privileges cannot be null. Pass an empty collection instead."); + return this; + } + + public Builder runAsPrivilege(String... runAsPrivilege) { + return runAsPrivilege(Arrays + .asList(Objects.requireNonNull(runAsPrivilege, "Run as privilege cannot be null. Pass an empty array instead."))); + } + + public Builder runAsPrivilege(Collection runAsPrivilege) { + this.runAsPrivilege = Objects.requireNonNull(runAsPrivilege, + "Run as privilege cannot be null. Pass an empty collection instead."); + return this; + } + + public Builder metadata(Map metadata) { + this.metadata = Objects.requireNonNull(metadata, "Metadata cannot be null. Pass an empty map instead."); + return this; + } + + public Role build() { + return new Role(clusterPrivileges, globalApplicationPrivileges, indicesPrivileges, applicationResourcePrivileges, + runAsPrivilege, metadata); + } + } + + /** + * Canonical cluster privilege names. There is no enforcement to only use these. + */ + public static class ClusterPrivilegeName { + public static final String NONE = "none"; + public static final String ALL = "all"; + public static final String MONITOR = "monitor"; + public static final String MONITOR_ML = "monitor_ml"; + public static final String MONITOR_WATCHER = "monitor_watcher"; + public static final String MONITOR_ROLLUP = "monitor_rollup"; + public static final String MANAGE = "manage"; + public static final String MANAGE_ML = "manage_ml"; + public static final String MANAGE_WATCHER = "manage_watcher"; + public static final String MANAGE_ROLLUP = "manage_rollup"; + public static final String MANAGE_INDEX_TEMPLATES = "manage_index_templates"; + public static final String MANAGE_INGEST_PIPELINES = "manage_ingest_pipelines"; + public static final String TRANSPORT_CLIENT = "transport_client"; + public static final String MANAGE_SECURITY = "manage_security"; + public static final String MANAGE_SAML = "manage_saml"; + public static final String MANAGE_PIPELINE = "manage_pipeline"; + public static final String MANAGE_CCR = "manage_ccr"; + public static final String READ_CCR = "read_ccr"; + } + + /** + * Canonical index privilege names. There is no enforcement to only use these. + */ + public static class IndexPrivilegeName { + public static final String NONE = "none"; + public static final String ALL = "all"; + public static final String READ = "read"; + public static final String READ_CROSS = "read_cross_cluster"; + public static final String CREATE = "create"; + public static final String INDEX = "index"; + public static final String DELETE = "delete"; + public static final String WRITE = "write"; + public static final String MONITOR = "monitor"; + public static final String MANAGE = "manage"; + public static final String DELETE_INDEX = "delete_index"; + public static final String CREATE_INDEX = "create_index"; + public static final String VIEW_INDEX_METADATA = "view_index_metadata"; + public static final String MANAGE_FOLLOW_INDEX = "manage_follow_index"; + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/tasks/TaskSubmissionResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/tasks/TaskSubmissionResponse.java new file mode 100644 index 0000000000000..7bc104c9bbf27 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/tasks/TaskSubmissionResponse.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.tasks; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class TaskSubmissionResponse extends ActionResponse { + + private static final ParseField TASK = new ParseField("task"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "task_submission_response", + true, a -> new TaskSubmissionResponse((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), TASK); + } + + private final String task; + + TaskSubmissionResponse(String task) { + this.task = task; + } + + /** + * Get the task id + * + * @return the id of the reindex task. + */ + public String getTask() { + return task; + } + + @Override + public int hashCode() { + return Objects.hash(task); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + TaskSubmissionResponse that = (TaskSubmissionResponse) other; + return Objects.equals(task, that.task); + } + + public static TaskSubmissionResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + +} diff --git a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt index cc179e12e3163..a9214e9333c4e 100644 --- a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt +++ b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt @@ -22,7 +22,6 @@ org.apache.http.entity.ContentType#create(java.lang.String,org.apache.http.NameV @defaultMessage ES's logging infrastructure uses log4j2 which we don't want to force on high level rest client users org.elasticsearch.common.logging.DeprecationLogger -org.elasticsearch.common.logging.ESLoggerFactory org.elasticsearch.common.logging.LogConfigurator org.elasticsearch.common.logging.LoggerMessageFormat org.elasticsearch.common.logging.Loggers diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index 1dd27cff0d92a..fed0e8921569c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -60,8 +60,6 @@ import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.DeleteByQueryAction; import org.elasticsearch.index.reindex.DeleteByQueryRequest; -import org.elasticsearch.index.reindex.ReindexAction; -import org.elasticsearch.index.reindex.ReindexRequest; import org.elasticsearch.index.reindex.UpdateByQueryAction; import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.rest.RestStatus; @@ -706,111 +704,6 @@ public void testBulk() throws IOException { validateBulkResponses(nbItems, errors, bulkResponse, bulkRequest); } - public void testReindex() throws Exception { - final String sourceIndex = "source1"; - final String destinationIndex = "dest"; - { - // Prepare - Settings settings = Settings.builder() - .put("number_of_shards", 1) - .put("number_of_replicas", 0) - .build(); - createIndex(sourceIndex, settings); - createIndex(destinationIndex, settings); - BulkRequest bulkRequest = new BulkRequest() - .add(new IndexRequest(sourceIndex, "type", "1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) - .add(new IndexRequest(sourceIndex, "type", "2").source(Collections.singletonMap("foo2", "bar2"), XContentType.JSON)) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE); - assertEquals( - RestStatus.OK, - highLevelClient().bulk( - bulkRequest, - RequestOptions.DEFAULT - ).status() - ); - } - { - // test1: create one doc in dest - ReindexRequest reindexRequest = new ReindexRequest(); - reindexRequest.setSourceIndices(sourceIndex); - reindexRequest.setDestIndex(destinationIndex); - reindexRequest.setSourceQuery(new IdsQueryBuilder().addIds("1").types("type")); - reindexRequest.setRefresh(true); - BulkByScrollResponse bulkResponse = execute(reindexRequest, highLevelClient()::reindex, highLevelClient()::reindexAsync); - assertEquals(1, bulkResponse.getCreated()); - assertEquals(1, bulkResponse.getTotal()); - assertEquals(0, bulkResponse.getDeleted()); - assertEquals(0, bulkResponse.getNoops()); - assertEquals(0, bulkResponse.getVersionConflicts()); - assertEquals(1, bulkResponse.getBatches()); - assertTrue(bulkResponse.getTook().getMillis() > 0); - assertEquals(1, bulkResponse.getBatches()); - assertEquals(0, bulkResponse.getBulkFailures().size()); - assertEquals(0, bulkResponse.getSearchFailures().size()); - } - { - // test2: create 1 and update 1 - ReindexRequest reindexRequest = new ReindexRequest(); - reindexRequest.setSourceIndices(sourceIndex); - reindexRequest.setDestIndex(destinationIndex); - BulkByScrollResponse bulkResponse = execute(reindexRequest, highLevelClient()::reindex, highLevelClient()::reindexAsync); - assertEquals(1, bulkResponse.getCreated()); - assertEquals(2, bulkResponse.getTotal()); - assertEquals(1, bulkResponse.getUpdated()); - assertEquals(0, bulkResponse.getDeleted()); - assertEquals(0, bulkResponse.getNoops()); - assertEquals(0, bulkResponse.getVersionConflicts()); - assertEquals(1, bulkResponse.getBatches()); - assertTrue(bulkResponse.getTook().getMillis() > 0); - assertEquals(1, bulkResponse.getBatches()); - assertEquals(0, bulkResponse.getBulkFailures().size()); - assertEquals(0, bulkResponse.getSearchFailures().size()); - } - { - // test reindex rethrottling - ReindexRequest reindexRequest = new ReindexRequest(); - reindexRequest.setSourceIndices(sourceIndex); - reindexRequest.setDestIndex(destinationIndex); - - // this following settings are supposed to halt reindexing after first document - reindexRequest.setSourceBatchSize(1); - reindexRequest.setRequestsPerSecond(0.00001f); - final CountDownLatch reindexTaskFinished = new CountDownLatch(1); - highLevelClient().reindexAsync(reindexRequest, RequestOptions.DEFAULT, new ActionListener() { - - @Override - public void onResponse(BulkByScrollResponse response) { - reindexTaskFinished.countDown(); - } - - @Override - public void onFailure(Exception e) { - fail(e.toString()); - } - }); - - TaskId taskIdToRethrottle = findTaskToRethrottle(ReindexAction.NAME); - float requestsPerSecond = 1000f; - ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), - highLevelClient()::reindexRethrottle, highLevelClient()::reindexRethrottleAsync); - assertThat(response.getTasks(), hasSize(1)); - assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId()); - assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); - assertEquals(Float.toString(requestsPerSecond), - ((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString()); - reindexTaskFinished.await(2, TimeUnit.SECONDS); - - // any rethrottling after the reindex is done performed with the same taskId should result in a failure - response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), - highLevelClient()::reindexRethrottle, highLevelClient()::reindexRethrottleAsync); - assertTrue(response.getTasks().isEmpty()); - assertFalse(response.getNodeFailures().isEmpty()); - assertEquals(1, response.getNodeFailures().size()); - assertEquals("Elasticsearch exception [type=resource_not_found_exception, reason=task [" + taskIdToRethrottle + "] is missing]", - response.getNodeFailures().get(0).getCause().getMessage()); - } - } - private TaskId findTaskToRethrottle(String actionName) throws IOException { long start = System.nanoTime(); ListTasksRequest request = new ListTasksRequest(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleRequestConvertersTests.java index 0030fd0773a78..1af29701bc7c8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleRequestConvertersTests.java @@ -99,9 +99,9 @@ public void testRemoveIndexLifecyclePolicy() { setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); Request request = IndexLifecycleRequestConverters.removeIndexLifecyclePolicy(req); - assertThat(request.getMethod(), equalTo(HttpDelete.METHOD_NAME)); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); String idxString = Strings.arrayToCommaDelimitedString(indices); - assertThat(request.getEndpoint(), equalTo("/" + (idxString.isEmpty() ? "" : (idxString + "/")) + "_ilm")); + assertThat(request.getEndpoint(), equalTo("/" + (idxString.isEmpty() ? "" : (idxString + "/")) + "_ilm/remove")); assertThat(request.getParameters(), equalTo(expectedParams)); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java index 3d6b2d6634f93..ba27bdd3a5bd4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java @@ -84,8 +84,7 @@ public void testXPackInfo() throws IOException { assertNotNull(ml.description()); assertTrue(ml.available()); assertTrue(ml.enabled()); - assertEquals(mainResponse.getVersion().toString(), - ml.nativeCodeInfo().get("version").toString().replace("-SNAPSHOT", "")); + assertEquals(mainResponse.getBuild().getQualifiedVersion(), ml.nativeCodeInfo().get("version").toString()); } public void testXPackInfoEmptyRequest() throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java new file mode 100644 index 0000000000000..afc5e99b5f03a --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java @@ -0,0 +1,133 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.tasks.TaskSubmissionResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.IdsQueryBuilder; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.util.Collections; +import java.util.function.BooleanSupplier; + +public class ReindexIT extends ESRestHighLevelClientTestCase { + + public void testReindex() throws IOException { + final String sourceIndex = "source1"; + final String destinationIndex = "dest"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + createIndex(destinationIndex, settings); + BulkRequest bulkRequest = new BulkRequest() + .add(new IndexRequest(sourceIndex, "type", "1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .add(new IndexRequest(sourceIndex, "type", "2").source(Collections.singletonMap("foo2", "bar2"), XContentType.JSON)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + bulkRequest, + RequestOptions.DEFAULT + ).status() + ); + } + { + // reindex one document with id 1 from source to destination + ReindexRequest reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices(sourceIndex); + reindexRequest.setDestIndex(destinationIndex); + reindexRequest.setSourceQuery(new IdsQueryBuilder().addIds("1").types("type")); + reindexRequest.setRefresh(true); + + BulkByScrollResponse bulkResponse = execute(reindexRequest, highLevelClient()::reindex, highLevelClient()::reindexAsync); + + assertEquals(1, bulkResponse.getCreated()); + assertEquals(1, bulkResponse.getTotal()); + assertEquals(0, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + } + } + + public void testReindexTask() throws IOException, InterruptedException { + final String sourceIndex = "source123"; + final String destinationIndex = "dest2"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + createIndex(destinationIndex, settings); + BulkRequest bulkRequest = new BulkRequest() + .add(new IndexRequest(sourceIndex, "type", "1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .add(new IndexRequest(sourceIndex, "type", "2").source(Collections.singletonMap("foo2", "bar2"), XContentType.JSON)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + bulkRequest, + RequestOptions.DEFAULT + ).status() + ); + } + { + ReindexRequest reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices(sourceIndex); + reindexRequest.setDestIndex(destinationIndex); + reindexRequest.setSourceQuery(new IdsQueryBuilder().addIds("1").types("type")); + reindexRequest.setRefresh(true); + + TaskSubmissionResponse reindexSubmission = highLevelClient().submitReindexTask(reindexRequest, RequestOptions.DEFAULT); + + BooleanSupplier hasUpgradeCompleted = checkCompletionStatus(reindexSubmission.getTask()); + awaitBusy(hasUpgradeCompleted); + } + } + + private BooleanSupplier checkCompletionStatus(String taskId) { + return () -> { + try { + Response response = client().performRequest(new Request("GET", "/_tasks/" + taskId)); + return (boolean) entityAsMap(response).get("completed"); + } catch (IOException e) { + fail(e.getMessage()); + return false; + } + }; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 066fb5d8cc903..20419ac314af9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -348,6 +348,7 @@ public void testReindex() throws IOException { setRandomTimeout(reindexRequest::setTimeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams); setRandomWaitForActiveShards(reindexRequest::setWaitForActiveShards, ActiveShardCount.DEFAULT, expectedParams); expectedParams.put("scroll", reindexRequest.getScrollTime().getStringRep()); + expectedParams.put("wait_for_completion", Boolean.TRUE.toString()); Request request = RequestConverters.reindex(reindexRequest); assertEquals("/_reindex", request.getEndpoint()); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 38810285a5d1c..2c5d279592f48 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -83,6 +83,7 @@ import org.elasticsearch.test.InternalAggregationTestCase; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; +import org.hamcrest.Matchers; import org.junit.Before; import java.io.IOException; @@ -114,6 +115,8 @@ public class RestHighLevelClientTests extends ESTestCase { + private static final String SUBMIT_TASK_PREFIX = "submit_"; + private static final String SUBMIT_TASK_SUFFIX = "_task"; private static final ProtocolVersion HTTP_PROTOCOL = new ProtocolVersion("http", 1, 1); private static final RequestLine REQUEST_LINE = new BasicRequestLine(HttpGet.METHOD_NAME, "/", HTTP_PROTOCOL); @@ -625,7 +628,7 @@ public void testDefaultNamedXContents() { public void testProvidedNamedXContents() { List namedXContents = RestHighLevelClient.getProvidedNamedXContents(); - assertEquals(16, namedXContents.size()); + assertEquals(17, namedXContents.size()); Map, Integer> categories = new HashMap<>(); List names = new ArrayList<>(); for (NamedXContentRegistry.Entry namedXContent : namedXContents) { @@ -635,8 +638,8 @@ public void testProvidedNamedXContents() { categories.put(namedXContent.categoryClass, counter + 1); } } - assertEquals(4, categories.size()); - assertEquals(Integer.valueOf(2), categories.get(Aggregation.class)); + assertEquals("Had: " + categories, 4, categories.size()); + assertEquals(Integer.valueOf(3), categories.get(Aggregation.class)); assertTrue(names.contains(ChildrenAggregationBuilder.NAME)); assertTrue(names.contains(MatrixStatsAggregationBuilder.NAME)); assertEquals(Integer.valueOf(4), categories.get(EvaluationMetric.class)); @@ -728,47 +731,11 @@ public void testApiNamingConventions() throws Exception { //we convert all the method names to snake case, hence we need to look for the '_async' suffix rather than 'Async' if (apiName.endsWith("_async")) { - assertTrue("async method [" + method.getName() + "] doesn't have corresponding sync method", - methods.containsKey(apiName.substring(0, apiName.length() - 6))); - assertThat("async method [" + method + "] should return void", method.getReturnType(), equalTo(Void.TYPE)); - assertEquals("async method [" + method + "] should not throw any exceptions", 0, method.getExceptionTypes().length); - if (apiName.equals("security.authenticate_async") || apiName.equals("security.get_ssl_certificates_async")) { - assertEquals(2, method.getParameterTypes().length); - assertThat(method.getParameterTypes()[0], equalTo(RequestOptions.class)); - assertThat(method.getParameterTypes()[1], equalTo(ActionListener.class)); - } else { - assertEquals("async method [" + method + "] has the wrong number of arguments", 3, method.getParameterTypes().length); - assertThat("the first parameter to async method [" + method + "] should be a request type", - method.getParameterTypes()[0].getSimpleName(), endsWith("Request")); - assertThat("the second parameter to async method [" + method + "] is the wrong type", - method.getParameterTypes()[1], equalTo(RequestOptions.class)); - assertThat("the third parameter to async method [" + method + "] is the wrong type", - method.getParameterTypes()[2], equalTo(ActionListener.class)); - } + assertAsyncMethod(methods, method, apiName); + } else if (isSubmitTaskMethod(apiName)) { + assertSubmitTaskMethod(methods, method, apiName, restSpec); } else { - //A few methods return a boolean rather than a response object - if (apiName.equals("ping") || apiName.contains("exist")) { - assertThat("the return type for method [" + method + "] is incorrect", - method.getReturnType().getSimpleName(), equalTo("boolean")); - } else { - assertThat("the return type for method [" + method + "] is incorrect", - method.getReturnType().getSimpleName(), endsWith("Response")); - } - - assertEquals("incorrect number of exceptions for method [" + method + "]", 1, method.getExceptionTypes().length); - //a few methods don't accept a request object as argument - if (apiName.equals("ping") || apiName.equals("info") || apiName.equals("security.get_ssl_certificates") - || apiName.equals("security.authenticate")) { - assertEquals("incorrect number of arguments for method [" + method + "]", 1, method.getParameterTypes().length); - assertThat("the parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[0], equalTo(RequestOptions.class)); - } else { - assertEquals("incorrect number of arguments for method [" + method + "]", 2, method.getParameterTypes().length); - assertThat("the first parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[0].getSimpleName(), endsWith("Request")); - assertThat("the second parameter to method [" + method + "] is the wrong type", - method.getParameterTypes()[1], equalTo(RequestOptions.class)); - } + assertSyncMethod(method, apiName); boolean remove = apiSpec.remove(apiName); if (remove == false) { @@ -785,7 +752,8 @@ public void testApiNamingConventions() throws Exception { apiName.startsWith("graph.") == false && apiName.startsWith("migration.") == false && apiName.startsWith("security.") == false && - apiName.startsWith("index_lifecycle.") == false) { + apiName.startsWith("index_lifecycle.") == false && + apiName.startsWith("ccr.") == false) { apiNotFound.add(apiName); } } @@ -803,6 +771,74 @@ public void testApiNamingConventions() throws Exception { assertThat("Some API are not supported but they should be: " + apiSpec, apiSpec.size(), equalTo(0)); } + private void assertSyncMethod(Method method, String apiName) { + //A few methods return a boolean rather than a response object + if (apiName.equals("ping") || apiName.contains("exist")) { + assertThat("the return type for method [" + method + "] is incorrect", + method.getReturnType().getSimpleName(), equalTo("boolean")); + } else { + assertThat("the return type for method [" + method + "] is incorrect", + method.getReturnType().getSimpleName(), endsWith("Response")); + } + + assertEquals("incorrect number of exceptions for method [" + method + "]", 1, method.getExceptionTypes().length); + //a few methods don't accept a request object as argument + if (apiName.equals("ping") || apiName.equals("info") || apiName.equals("security.get_ssl_certificates") + || apiName.equals("security.authenticate")) { + assertEquals("incorrect number of arguments for method [" + method + "]", 1, method.getParameterTypes().length); + assertThat("the parameter to method [" + method + "] is the wrong type", + method.getParameterTypes()[0], equalTo(RequestOptions.class)); + } else { + assertEquals("incorrect number of arguments for method [" + method + "]", 2, method.getParameterTypes().length); + assertThat("the first parameter to method [" + method + "] is the wrong type", + method.getParameterTypes()[0].getSimpleName(), endsWith("Request")); + assertThat("the second parameter to method [" + method + "] is the wrong type", + method.getParameterTypes()[1], equalTo(RequestOptions.class)); + } + } + + private void assertAsyncMethod(Map methods, Method method, String apiName) { + assertTrue("async method [" + method.getName() + "] doesn't have corresponding sync method", + methods.containsKey(apiName.substring(0, apiName.length() - 6))); + assertThat("async method [" + method + "] should return void", method.getReturnType(), equalTo(Void.TYPE)); + assertEquals("async method [" + method + "] should not throw any exceptions", 0, method.getExceptionTypes().length); + if (apiName.equals("security.authenticate_async") || apiName.equals("security.get_ssl_certificates_async")) { + assertEquals(2, method.getParameterTypes().length); + assertThat(method.getParameterTypes()[0], equalTo(RequestOptions.class)); + assertThat(method.getParameterTypes()[1], equalTo(ActionListener.class)); + } else { + assertEquals("async method [" + method + "] has the wrong number of arguments", 3, method.getParameterTypes().length); + assertThat("the first parameter to async method [" + method + "] should be a request type", + method.getParameterTypes()[0].getSimpleName(), endsWith("Request")); + assertThat("the second parameter to async method [" + method + "] is the wrong type", + method.getParameterTypes()[1], equalTo(RequestOptions.class)); + assertThat("the third parameter to async method [" + method + "] is the wrong type", + method.getParameterTypes()[2], equalTo(ActionListener.class)); + } + } + + private void assertSubmitTaskMethod(Map methods, Method method, String apiName, ClientYamlSuiteRestSpec restSpec) { + String methodName = extractMethodName(apiName); + assertTrue("submit task method [" + method.getName() + "] doesn't have corresponding sync method", + methods.containsKey(methodName)); + assertEquals("submit task method [" + method + "] has the wrong number of arguments", 2, method.getParameterTypes().length); + assertThat("the first parameter to submit task method [" + method + "] is the wrong type", + method.getParameterTypes()[0].getSimpleName(), endsWith("Request")); + assertThat("the second parameter to submit task method [" + method + "] is the wrong type", + method.getParameterTypes()[1], equalTo(RequestOptions.class)); + + assertThat("submit task method [" + method + "] must have wait_for_completion parameter in rest spec", + restSpec.getApi(methodName).getParams(), Matchers.hasKey("wait_for_completion")); + } + + private String extractMethodName(String apiName) { + return apiName.substring(SUBMIT_TASK_PREFIX.length(), apiName.length() - SUBMIT_TASK_SUFFIX.length()); + } + + private boolean isSubmitTaskMethod(String apiName) { + return apiName.startsWith(SUBMIT_TASK_PREFIX) && apiName.endsWith(SUBMIT_TASK_SUFFIX); + } + private static Stream> getSubClientMethods(String namespace, Class clientClass) { return Arrays.stream(clientClass.getMethods()).filter(method -> method.getDeclaringClass().equals(clientClass)) .map(method -> Tuple.tuple(namespace + "." + toSnakeCase(method.getName()), method)) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java index 74a4d58e2bf77..5a5091fe7586d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java @@ -25,6 +25,7 @@ import org.elasticsearch.client.security.PutUserRequest; import org.elasticsearch.client.security.PutUserResponse; import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.client.security.user.User; import org.elasticsearch.common.CharArrays; import java.util.Arrays; @@ -34,12 +35,29 @@ import java.util.Map; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; public class SecurityIT extends ESRestHighLevelClientTestCase { + public void testPutUser() throws Exception { + final SecurityClient securityClient = highLevelClient().security(); + // create user + final PutUserRequest putUserRequest = randomPutUserRequest(randomBoolean()); + final PutUserResponse putUserResponse = execute(putUserRequest, securityClient::putUser, securityClient::putUserAsync); + // assert user created + assertThat(putUserResponse.isCreated(), is(true)); + // update user + final User updatedUser = randomUser(putUserRequest.getUser().getUsername()); + final PutUserRequest updateUserRequest = randomPutUserRequest(updatedUser, randomBoolean()); + final PutUserResponse updateUserResponse = execute(updateUserRequest, securityClient::putUser, securityClient::putUserAsync); + // assert user not created + assertThat(updateUserResponse.isCreated(), is(false)); + // delete user + final Request deleteUserRequest = new Request(HttpDelete.METHOD_NAME, + "/_xpack/security/user/" + putUserRequest.getUser().getUsername()); + highLevelClient().getLowLevelClient().performRequest(deleteUserRequest); + } + public void testAuthenticate() throws Exception { final SecurityClient securityClient = highLevelClient().security(); // test fixture: put enabled user @@ -48,34 +66,30 @@ public void testAuthenticate() throws Exception { assertThat(putUserResponse.isCreated(), is(true)); // authenticate correctly - final String basicAuthHeader = basicAuthHeader(putUserRequest.getUsername(), putUserRequest.getPassword()); + final String basicAuthHeader = basicAuthHeader(putUserRequest.getUser().getUsername(), putUserRequest.getPassword()); final AuthenticateResponse authenticateResponse = execute(securityClient::authenticate, securityClient::authenticateAsync, authorizationRequestOptions(basicAuthHeader)); - assertThat(authenticateResponse.getUser().username(), is(putUserRequest.getUsername())); - if (putUserRequest.getRoles().isEmpty()) { - assertThat(authenticateResponse.getUser().roles(), is(empty())); - } else { - assertThat(authenticateResponse.getUser().roles(), contains(putUserRequest.getRoles().toArray())); - } - assertThat(authenticateResponse.getUser().metadata(), is(putUserRequest.getMetadata())); - assertThat(authenticateResponse.getUser().fullName(), is(putUserRequest.getFullName())); - assertThat(authenticateResponse.getUser().email(), is(putUserRequest.getEmail())); + assertThat(authenticateResponse.getUser(), is(putUserRequest.getUser())); assertThat(authenticateResponse.enabled(), is(true)); // delete user - final Request deleteUserRequest = new Request(HttpDelete.METHOD_NAME, "/_xpack/security/user/" + putUserRequest.getUsername()); + final Request deleteUserRequest = new Request(HttpDelete.METHOD_NAME, + "/_xpack/security/user/" + putUserRequest.getUser().getUsername()); highLevelClient().getLowLevelClient().performRequest(deleteUserRequest); // authentication no longer works ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> execute(securityClient::authenticate, securityClient::authenticateAsync, authorizationRequestOptions(basicAuthHeader))); - assertThat(e.getMessage(), containsString("unable to authenticate user [" + putUserRequest.getUsername() + "]")); + assertThat(e.getMessage(), containsString("unable to authenticate user [" + putUserRequest.getUser().getUsername() + "]")); } - private static PutUserRequest randomPutUserRequest(boolean enabled) { + private static User randomUser() { final String username = randomAlphaOfLengthBetween(1, 4); - final char[] password = randomAlphaOfLengthBetween(6, 10).toCharArray(); + return randomUser(username); + } + + private static User randomUser(String username) { final List roles = Arrays.asList(generateRandomStringArray(3, 3, false, true)); final String fullName = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 3)); final String email = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 3)); @@ -91,15 +105,25 @@ private static PutUserRequest randomPutUserRequest(boolean enabled) { } else { metadata.put("string_list", Arrays.asList(generateRandomStringArray(4, 4, false, true))); } - return new PutUserRequest(username, password, roles, fullName, email, enabled, metadata, RefreshPolicy.IMMEDIATE); + return new User(username, roles, metadata, fullName, email); + } + + private static PutUserRequest randomPutUserRequest(boolean enabled) { + final User user = randomUser(); + return randomPutUserRequest(user, enabled); } - + + private static PutUserRequest randomPutUserRequest(User user, boolean enabled) { + final char[] password = randomAlphaOfLengthBetween(6, 10).toCharArray(); + return new PutUserRequest(user, password, enabled, RefreshPolicy.IMMEDIATE); + } + private static String basicAuthHeader(String username, char[] password) { final String concat = new StringBuilder().append(username).append(':').append(password).toString(); final byte[] concatBytes = CharArrays.toUtf8Bytes(concat.toCharArray()); return "Basic " + Base64.getEncoder().encodeToString(concatBytes); } - + private static RequestOptions authorizationRequestOptions(String authorizationHeader) { final RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); builder.addHeader("Authorization", authorizationHeader); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java index e0499c621f7ba..d2679906af207 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.client.security.support.expressiondsl.RoleMapperExpression; import org.elasticsearch.client.security.support.expressiondsl.expressions.AnyRoleMapperExpression; import org.elasticsearch.client.security.support.expressiondsl.fields.FieldRoleMapperExpression; +import org.elasticsearch.client.security.user.User; import org.elasticsearch.common.Strings; import org.elasticsearch.test.ESTestCase; @@ -57,23 +58,21 @@ public void testPutUser() throws IOException { final String email = randomBoolean() ? null : randomAlphaOfLengthBetween(12, 24); final String fullName = randomBoolean() ? null : randomAlphaOfLengthBetween(7, 14); final boolean enabled = randomBoolean(); - final Map metadata; + final Map metadata = new HashMap<>(); if (randomBoolean()) { - metadata = new HashMap<>(); for (int i = 0; i < randomIntBetween(0, 10); i++) { metadata.put(String.valueOf(i), randomAlphaOfLengthBetween(1, 12)); } - } else { - metadata = null; } + final User user = new User(username, roles, metadata, fullName, email); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); final Map expectedParams = getExpectedParamsFromRefreshPolicy(refreshPolicy); - PutUserRequest putUserRequest = new PutUserRequest(username, password, roles, fullName, email, enabled, metadata, refreshPolicy); + PutUserRequest putUserRequest = new PutUserRequest(user, password, enabled, refreshPolicy); Request request = SecurityRequestConverters.putUser(putUserRequest); assertEquals(HttpPut.METHOD_NAME, request.getMethod()); - assertEquals("/_xpack/security/user/" + putUserRequest.getUsername(), request.getEndpoint()); + assertEquals("/_xpack/security/user/" + putUserRequest.getUser().getUsername(), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertToXContentBody(putUserRequest, request.getEntity()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotRequestConvertersTests.java index efd321aa7ee34..ca86a9120422b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotRequestConvertersTests.java @@ -51,7 +51,7 @@ import static org.hamcrest.Matchers.nullValue; public class SnapshotRequestConvertersTests extends ESTestCase { - + public void testGetRepositories() { Map expectedParams = new HashMap<>(); StringBuilder endpoint = new StringBuilder("/_snapshot"); @@ -61,14 +61,14 @@ public void testGetRepositories() { RequestConvertersTests.setRandomLocal(getRepositoriesRequest, expectedParams); if (randomBoolean()) { - String[] entries = new String[] { "a", "b", "c" }; + String[] entries = new String[]{"a", "b", "c"}; getRepositoriesRequest.repositories(entries); endpoint.append("/" + String.join(",", entries)); } Request request = SnapshotRequestConverters.getRepositories(getRepositoriesRequest); - assertThat(endpoint.toString(), equalTo(request.getEndpoint())); - assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); + assertThat(request.getEndpoint(), equalTo(endpoint.toString())); + assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); assertThat(expectedParams, equalTo(request.getParameters())); } @@ -88,8 +88,8 @@ public void testCreateRepository() throws IOException { .build()); Request request = SnapshotRequestConverters.createRepository(putRepositoryRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpPut.METHOD_NAME, equalTo(request.getMethod())); + assertThat(request.getEndpoint(), equalTo(endpoint)); + assertThat(request.getMethod(), equalTo(HttpPut.METHOD_NAME)); RequestConvertersTests.assertToXContentBody(putRepositoryRequest, request.getEntity()); } @@ -105,9 +105,9 @@ public void testDeleteRepository() { RequestConvertersTests.setRandomTimeout(deleteRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); Request request = SnapshotRequestConverters.deleteRepository(deleteRepositoryRequest); - assertThat(endpoint.toString(), equalTo(request.getEndpoint())); - assertThat(HttpDelete.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); + assertThat(request.getEndpoint(), equalTo(endpoint.toString())); + assertThat(request.getMethod(), equalTo(HttpDelete.METHOD_NAME)); + assertThat(request.getParameters(), equalTo(expectedParams)); assertNull(request.getEntity()); } @@ -121,9 +121,9 @@ public void testVerifyRepository() { RequestConvertersTests.setRandomTimeout(verifyRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); Request request = SnapshotRequestConverters.verifyRepository(verifyRepositoryRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpPost.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); + assertThat(request.getEndpoint(), equalTo(endpoint)); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(request.getParameters(), equalTo(expectedParams)); } public void testCreateSnapshot() throws IOException { @@ -137,14 +137,12 @@ public void testCreateSnapshot() throws IOException { Boolean waitForCompletion = randomBoolean(); createSnapshotRequest.waitForCompletion(waitForCompletion); - if (waitForCompletion) { - expectedParams.put("wait_for_completion", waitForCompletion.toString()); - } + expectedParams.put("wait_for_completion", waitForCompletion.toString()); Request request = SnapshotRequestConverters.createSnapshot(createSnapshotRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpPut.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); + assertThat(request.getEndpoint(), equalTo(endpoint)); + assertThat(request.getMethod(), equalTo(HttpPut.METHOD_NAME)); + assertThat(request.getParameters(), equalTo(expectedParams)); RequestConvertersTests.assertToXContentBody(createSnapshotRequest, request.getEntity()); } @@ -178,9 +176,9 @@ public void testGetSnapshots() { } Request request = SnapshotRequestConverters.getSnapshots(getSnapshotsRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); + assertThat(request.getEndpoint(), equalTo(endpoint)); + assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(request.getParameters(), equalTo(expectedParams)); assertNull(request.getEntity()); } @@ -202,9 +200,9 @@ public void testGetAllSnapshots() { expectedParams.put("verbose", Boolean.toString(verbose)); Request request = SnapshotRequestConverters.getSnapshots(getSnapshotsRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); + assertThat(request.getEndpoint(), equalTo(endpoint)); + assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(request.getParameters(), equalTo(expectedParams)); assertNull(request.getEntity()); } @@ -239,10 +237,10 @@ public void testRestoreSnapshot() throws IOException { RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(repository, snapshot); RequestConvertersTests.setRandomMasterTimeout(restoreSnapshotRequest, expectedParams); - if (randomBoolean()) { - restoreSnapshotRequest.waitForCompletion(true); - expectedParams.put("wait_for_completion", "true"); - } + boolean waitForCompletion = randomBoolean(); + restoreSnapshotRequest.waitForCompletion(waitForCompletion); + expectedParams.put("wait_for_completion", Boolean.toString(waitForCompletion)); + if (randomBoolean()) { String timeout = randomTimeValue(); restoreSnapshotRequest.masterNodeTimeout(timeout); @@ -250,9 +248,9 @@ public void testRestoreSnapshot() throws IOException { } Request request = SnapshotRequestConverters.restoreSnapshot(restoreSnapshotRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpPost.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); + assertThat(request.getEndpoint(), equalTo(endpoint)); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(request.getParameters(), equalTo(expectedParams)); RequestConvertersTests.assertToXContentBody(restoreSnapshotRequest, request.getEntity()); } @@ -269,9 +267,9 @@ public void testDeleteSnapshot() { RequestConvertersTests.setRandomMasterTimeout(deleteSnapshotRequest, expectedParams); Request request = SnapshotRequestConverters.deleteSnapshot(deleteSnapshotRequest); - assertThat(endpoint, equalTo(request.getEndpoint())); - assertThat(HttpDelete.METHOD_NAME, equalTo(request.getMethod())); - assertThat(expectedParams, equalTo(request.getParameters())); + assertThat(request.getEndpoint(), equalTo(endpoint)); + assertThat(request.getMethod(), equalTo(HttpDelete.METHOD_NAME)); + assertThat(request.getParameters(), equalTo(expectedParams)); assertNull(request.getEntity()); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java index ff6726faee18d..4b7889d3b7e7a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java @@ -62,12 +62,10 @@ public void testListTasks() { expectedParams.put("detailed", "true"); } } - if (randomBoolean()) { - request.setWaitForCompletion(randomBoolean()); - if (request.getWaitForCompletion()) { - expectedParams.put("wait_for_completion", "true"); - } - } + + request.setWaitForCompletion(randomBoolean()); + expectedParams.put("wait_for_completion", Boolean.toString(request.getWaitForCompletion())); + if (randomBoolean()) { String timeout = randomTimeValue(); request.setTimeout(timeout); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedResponseTests.java new file mode 100644 index 0000000000000..36ba953073987 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/AcknowledgedResponseTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.core; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class AcknowledgedResponseTests extends AbstractXContentTestCase { + + @Override + protected AcknowledgedResponse createTestInstance() { + return new AcknowledgedResponse(randomBoolean()); + } + + @Override + protected AcknowledgedResponse doParseInstance(XContentParser parser) throws IOException { + return AcknowledgedResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java new file mode 100644 index 0000000000000..e61123f722f40 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java @@ -0,0 +1,139 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.documentation; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.ccr.PauseFollowRequest; +import org.elasticsearch.client.core.AcknowledgedResponse; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class CCRDocumentationIT extends ESRestHighLevelClientTestCase { + + public void testPauseFollow() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + // Configure local cluster as remote cluster: + + // TODO: replace with nodes info highlevel rest client code when it is available: + final Request request = new Request("GET", "/_nodes"); + Map nodesResponse = (Map) toMap(client().performRequest(request)).get("nodes"); + // Select node info of first node (we don't know the node id): + nodesResponse = (Map) nodesResponse.get(nodesResponse.keySet().iterator().next()); + String transportAddress = (String) nodesResponse.get("transport_address"); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.transientSettings(Collections.singletonMap("cluster.remote.local.seeds", transportAddress)); + ClusterUpdateSettingsResponse updateSettingsResponse = + client.cluster().putSettings(updateSettingsRequest, RequestOptions.DEFAULT); + assertThat(updateSettingsResponse.isAcknowledged(), is(true)); + } + { + // Create leader index: + CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader"); + createIndexRequest.settings(Collections.singletonMap("index.soft_deletes.enabled", true)); + CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + assertThat(response.isAcknowledged(), is(true)); + } + String followIndex = "follower"; + // Follow index, so that it can be paused: + { + // TODO: Replace this with high level rest client code when put follow API is available: + final Request request = new Request("PUT", "/" + followIndex + "/_ccr/follow"); + request.setJsonEntity("{\"remote_cluster\": \"local\", \"leader_index\": \"leader\"}"); + Response response = client().performRequest(request); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + } + + // tag::ccr-pause-follow-request + PauseFollowRequest request = new PauseFollowRequest(followIndex); // <1> + // end::ccr-pause-follow-request + + // tag::ccr-pause-follow-execute + AcknowledgedResponse response = + client.ccr().pauseFollow(request, RequestOptions.DEFAULT); + // end::ccr-pause-follow-execute + + // tag::ccr-pause-follow-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::ccr-pause-follow-response + + // tag::ccr-pause-follow-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + boolean acknowledged = response.isAcknowledged(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::ccr-pause-follow-execute-listener + + // Resume follow index, so that it can be paused again: + { + // TODO: Replace this with high level rest client code when resume follow API is available: + final Request req = new Request("POST", "/" + followIndex + "/_ccr/resume_follow"); + req.setJsonEntity("{}"); + Response res = client().performRequest(req); + assertThat(res.getStatusLine().getStatusCode(), equalTo(200)); + } + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ccr-pause-follow-execute-async + client.ccr() + .pauseFollowAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::ccr-pause-follow-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + static Map toMap(Response response) throws IOException { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java index 0a0ca9215c93d..bf0edcfe91c24 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java @@ -57,16 +57,11 @@ import org.elasticsearch.client.rollup.job.config.TermsGroupConfig; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.junit.After; import org.junit.Before; -import java.io.BufferedReader; import java.io.IOException; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -411,62 +406,6 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } - @After - public void wipeRollup() throws Exception { - // TODO move this to ESRestTestCase - deleteRollupJobs(); - waitForPendingRollupTasks(); - } - - private void deleteRollupJobs() throws Exception { - Response response = adminClient().performRequest(new Request("GET", "/_xpack/rollup/job/_all")); - Map jobs = entityAsMap(response); - @SuppressWarnings("unchecked") - List> jobConfigs = - (List>) XContentMapValues.extractValue("jobs", jobs); - - if (jobConfigs == null) { - return; - } - - for (Map jobConfig : jobConfigs) { - @SuppressWarnings("unchecked") - String jobId = (String) ((Map) jobConfig.get("config")).get("id"); - Request request = new Request("DELETE", "/_xpack/rollup/job/" + jobId); - request.addParameter("ignore", "404"); // Ignore 404s because they imply someone was racing us to delete this - adminClient().performRequest(request); - } - } - - private void waitForPendingRollupTasks() throws Exception { - assertBusy(() -> { - try { - Request request = new Request("GET", "/_cat/tasks"); - request.addParameter("detailed", "true"); - Response response = adminClient().performRequest(request); - - try (BufferedReader responseReader = new BufferedReader( - new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8))) { - int activeTasks = 0; - String line; - StringBuilder tasksListString = new StringBuilder(); - while ((line = responseReader.readLine()) != null) { - - // We only care about Rollup jobs, otherwise this fails too easily due to unrelated tasks - if (line.startsWith("xpack/rollup/job") == true) { - activeTasks++; - tasksListString.append(line).append('\n'); - } - } - assertEquals(activeTasks + " active tasks found:\n" + tasksListString, 0, activeTasks); - } - } catch (IOException e) { - // Throw an assertion error so we retry - throw new AssertionError("Error getting active tasks list", e); - } - }); - } - @SuppressWarnings("unused") public void testDeleteRollupJob() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -512,4 +451,4 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } -} \ No newline at end of file +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index 3fc787fa8585d..ffa30e16c0468 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -31,6 +31,8 @@ import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.security.AuthenticateResponse; import org.elasticsearch.client.security.ChangePasswordRequest; +import org.elasticsearch.client.security.ClearRealmCacheRequest; +import org.elasticsearch.client.security.ClearRealmCacheResponse; import org.elasticsearch.client.security.ClearRolesCacheRequest; import org.elasticsearch.client.security.ClearRolesCacheResponse; import org.elasticsearch.client.security.CreateTokenRequest; @@ -88,8 +90,8 @@ public void testPutUser() throws Exception { { //tag::put-user-execute char[] password = new char[]{'p', 'a', 's', 's', 'w', 'o', 'r', 'd'}; - PutUserRequest request = - new PutUserRequest("example", password, Collections.singletonList("superuser"), null, null, true, null, RefreshPolicy.NONE); + User user = new User("example", Collections.singletonList("superuser")); + PutUserRequest request = new PutUserRequest(user, password, true, RefreshPolicy.NONE); PutUserResponse response = client.security().putUser(request, RequestOptions.DEFAULT); //end::put-user-execute @@ -102,8 +104,8 @@ public void testPutUser() throws Exception { { char[] password = new char[]{'p', 'a', 's', 's', 'w', 'o', 'r', 'd'}; - PutUserRequest request = new PutUserRequest("example2", password, Collections.singletonList("superuser"), null, null, true, - null, RefreshPolicy.NONE); + User user2 = new User("example2", Collections.singletonList("superuser")); + PutUserRequest request = new PutUserRequest(user2, password, true, RefreshPolicy.NONE); // tag::put-user-execute-listener ActionListener listener = new ActionListener() { @Override @@ -298,8 +300,8 @@ public void onFailure(Exception e) { public void testEnableUser() throws Exception { RestHighLevelClient client = highLevelClient(); char[] password = new char[]{'p', 'a', 's', 's', 'w', 'o', 'r', 'd'}; - PutUserRequest putUserRequest = new PutUserRequest("enable_user", password, Collections.singletonList("superuser"), null, - null, true, null, RefreshPolicy.IMMEDIATE); + User enable_user = new User("enable_user", Collections.singletonList("superuser")); + PutUserRequest putUserRequest = new PutUserRequest(enable_user, password, true, RefreshPolicy.IMMEDIATE); PutUserResponse putUserResponse = client.security().putUser(putUserRequest, RequestOptions.DEFAULT); assertTrue(putUserResponse.isCreated()); @@ -343,8 +345,8 @@ public void onFailure(Exception e) { public void testDisableUser() throws Exception { RestHighLevelClient client = highLevelClient(); char[] password = new char[]{'p', 'a', 's', 's', 'w', 'o', 'r', 'd'}; - PutUserRequest putUserRequest = new PutUserRequest("disable_user", password, Collections.singletonList("superuser"), null, - null, true, null, RefreshPolicy.IMMEDIATE); + User disable_user = new User("disable_user", Collections.singletonList("superuser")); + PutUserRequest putUserRequest = new PutUserRequest(disable_user, password, true, RefreshPolicy.IMMEDIATE); PutUserResponse putUserResponse = client.security().putUser(putUserRequest, RequestOptions.DEFAULT); assertTrue(putUserResponse.isCreated()); { @@ -396,11 +398,11 @@ public void testAuthenticate() throws Exception { boolean enabled = response.enabled(); // <2> //end::authenticate-response - assertThat(user.username(), is("test_user")); - assertThat(user.roles(), contains(new String[] {"superuser"})); - assertThat(user.fullName(), nullValue()); - assertThat(user.email(), nullValue()); - assertThat(user.metadata().isEmpty(), is(true)); + assertThat(user.getUsername(), is("test_user")); + assertThat(user.getRoles(), contains(new String[] {"superuser"})); + assertThat(user.getFullName(), nullValue()); + assertThat(user.getEmail(), nullValue()); + assertThat(user.getMetadata().isEmpty(), is(true)); assertThat(enabled, is(true)); } @@ -422,6 +424,7 @@ public void onFailure(Exception e) { // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); listener = new LatchedActionListener<>(listener, latch); + // tag::authenticate-execute-async client.security().authenticateAsync(RequestOptions.DEFAULT, listener); // <1> // end::authenticate-execute-async @@ -430,6 +433,51 @@ public void onFailure(Exception e) { } } + public void testClearRealmCache() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + //tag::clear-realm-cache-request + ClearRealmCacheRequest request = new ClearRealmCacheRequest(Collections.emptyList(), Collections.emptyList()); + //end::clear-realm-cache-request + //tag::clear-realm-cache-execute + ClearRealmCacheResponse response = client.security().clearRealmCache(request, RequestOptions.DEFAULT); + //end::clear-realm-cache-execute + + assertNotNull(response); + assertThat(response.getNodes(), not(empty())); + + //tag::clear-realm-cache-response + List nodes = response.getNodes(); // <1> + //end::clear-realm-cache-response + } + { + //tag::clear-realm-cache-execute-listener + ClearRealmCacheRequest request = new ClearRealmCacheRequest(Collections.emptyList(), Collections.emptyList()); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ClearRealmCacheResponse clearRealmCacheResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::clear-realm-cache-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::clear-realm-cache-execute-async + client.security().clearRealmCacheAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::clear-realm-cache-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testClearRolesCache() throws Exception { RestHighLevelClient client = highLevelClient(); { @@ -560,8 +608,8 @@ public void testChangePassword() throws Exception { RestHighLevelClient client = highLevelClient(); char[] password = new char[]{'p', 'a', 's', 's', 'w', 'o', 'r', 'd'}; char[] newPassword = new char[]{'n', 'e', 'w', 'p', 'a', 's', 's', 'w', 'o', 'r', 'd'}; - PutUserRequest putUserRequest = new PutUserRequest("change_password_user", password, Collections.singletonList("superuser"), - null, null, true, null, RefreshPolicy.NONE); + User user = new User("change_password_user", Collections.singletonList("superuser"), Collections.emptyMap(), null, null); + PutUserRequest putUserRequest = new PutUserRequest(user, password, true, RefreshPolicy.NONE); PutUserResponse putUserResponse = client.security().putUser(putUserRequest, RequestOptions.DEFAULT); assertTrue(putUserResponse.isCreated()); { @@ -726,8 +774,8 @@ public void testCreateToken() throws Exception { { // Setup user - PutUserRequest putUserRequest = new PutUserRequest("token_user", "password".toCharArray(), - Collections.singletonList("kibana_user"), null, null, true, null, RefreshPolicy.IMMEDIATE); + User token_user = new User("token_user", Collections.singletonList("kibana_user")); + PutUserRequest putUserRequest = new PutUserRequest(token_user, "password".toCharArray(), true, RefreshPolicy.IMMEDIATE); PutUserResponse putUserResponse = client.security().putUser(putUserRequest, RequestOptions.DEFAULT); assertTrue(putUserResponse.isCreated()); } @@ -804,8 +852,8 @@ public void testInvalidateToken() throws Exception { { // Setup user final char[] password = "password".toCharArray(); - PutUserRequest putUserRequest = new PutUserRequest("invalidate_token", password, - Collections.singletonList("kibana_user"), null, null, true, null, RefreshPolicy.IMMEDIATE); + User invalidate_token_user = new User("invalidate_token", Collections.singletonList("kibana_user")); + PutUserRequest putUserRequest = new PutUserRequest(invalidate_token_user, password, true, RefreshPolicy.IMMEDIATE); PutUserResponse putUserResponse = client.security().putUser(putUserRequest, RequestOptions.DEFAULT); assertTrue(putUserResponse.isCreated()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java index ce813f5ecf59c..1931ce3f69883 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java @@ -77,14 +77,14 @@ private void toXContent(AuthenticateResponse response, XContentBuilder builder) final User user = response.getUser(); final boolean enabled = response.enabled(); builder.startObject(); - builder.field(AuthenticateResponse.USERNAME.getPreferredName(), user.username()); - builder.field(AuthenticateResponse.ROLES.getPreferredName(), user.roles()); - builder.field(AuthenticateResponse.METADATA.getPreferredName(), user.metadata()); - if (user.fullName() != null) { - builder.field(AuthenticateResponse.FULL_NAME.getPreferredName(), user.fullName()); + builder.field(AuthenticateResponse.USERNAME.getPreferredName(), user.getUsername()); + builder.field(AuthenticateResponse.ROLES.getPreferredName(), user.getRoles()); + builder.field(AuthenticateResponse.METADATA.getPreferredName(), user.getMetadata()); + if (user.getFullName() != null) { + builder.field(AuthenticateResponse.FULL_NAME.getPreferredName(), user.getFullName()); } - if (user.email() != null) { - builder.field(AuthenticateResponse.EMAIL.getPreferredName(), user.email()); + if (user.getEmail() != null) { + builder.field(AuthenticateResponse.EMAIL.getPreferredName(), user.getEmail()); } builder.field(AuthenticateResponse.ENABLED.getPreferredName(), enabled); builder.endObject(); @@ -92,8 +92,8 @@ private void toXContent(AuthenticateResponse response, XContentBuilder builder) private AuthenticateResponse copy(AuthenticateResponse response) { final User originalUser = response.getUser(); - final User copyUser = new User(originalUser.username(), originalUser.roles(), originalUser.metadata(), originalUser.fullName(), - originalUser.email()); + final User copyUser = new User(originalUser.getUsername(), originalUser.getRoles(), originalUser.getMetadata(), + originalUser.getFullName(), originalUser.getEmail()); return new AuthenticateResponse(copyUser, response.enabled()); } @@ -101,27 +101,27 @@ private AuthenticateResponse mutate(AuthenticateResponse response) { final User originalUser = response.getUser(); switch (randomIntBetween(1, 6)) { case 1: - return new AuthenticateResponse(new User(originalUser.username() + "wrong", originalUser.roles(), originalUser.metadata(), - originalUser.fullName(), originalUser.email()), response.enabled()); + return new AuthenticateResponse(new User(originalUser.getUsername() + "wrong", originalUser.getRoles(), + originalUser.getMetadata(), originalUser.getFullName(), originalUser.getEmail()), response.enabled()); case 2: - final Collection wrongRoles = new ArrayList<>(originalUser.roles()); + final Collection wrongRoles = new ArrayList<>(originalUser.getRoles()); wrongRoles.add(randomAlphaOfLengthBetween(1, 4)); - return new AuthenticateResponse(new User(originalUser.username(), wrongRoles, originalUser.metadata(), - originalUser.fullName(), originalUser.email()), response.enabled()); + return new AuthenticateResponse(new User(originalUser.getUsername(), wrongRoles, originalUser.getMetadata(), + originalUser.getFullName(), originalUser.getEmail()), response.enabled()); case 3: - final Map wrongMetadata = new HashMap<>(originalUser.metadata()); + final Map wrongMetadata = new HashMap<>(originalUser.getMetadata()); wrongMetadata.put("wrong_string", randomAlphaOfLengthBetween(0, 4)); - return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), wrongMetadata, - originalUser.fullName(), originalUser.email()), response.enabled()); + return new AuthenticateResponse(new User(originalUser.getUsername(), originalUser.getRoles(), wrongMetadata, + originalUser.getFullName(), originalUser.getEmail()), response.enabled()); case 4: - return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), originalUser.metadata(), - originalUser.fullName() + "wrong", originalUser.email()), response.enabled()); + return new AuthenticateResponse(new User(originalUser.getUsername(), originalUser.getRoles(), originalUser.getMetadata(), + originalUser.getFullName() + "wrong", originalUser.getEmail()), response.enabled()); case 5: - return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), originalUser.metadata(), - originalUser.fullName(), originalUser.email() + "wrong"), response.enabled()); + return new AuthenticateResponse(new User(originalUser.getUsername(), originalUser.getRoles(), originalUser.getMetadata(), + originalUser.getFullName(), originalUser.getEmail() + "wrong"), response.enabled()); case 6: - return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), originalUser.metadata(), - originalUser.fullName(), originalUser.email()), !response.enabled()); + return new AuthenticateResponse(new User(originalUser.getUsername(), originalUser.getRoles(), originalUser.getMetadata(), + originalUser.getFullName(), originalUser.getEmail()), !response.enabled()); } throw new IllegalStateException("Bad random number"); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ClearRealmCacheResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ClearRealmCacheResponseTests.java new file mode 100644 index 0000000000000..d21ed1a71a0c4 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ClearRealmCacheResponseTests.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class ClearRealmCacheResponseTests extends ESTestCase { + + public void testParseFromXContent() throws IOException { + final ElasticsearchException exception = new ElasticsearchException("test"); + final String nodesHeader = "\"_nodes\": { \"total\": 2, \"successful\": 1, \"failed\": 1, \"failures\": [ " + + Strings.toString(exception) + "] },"; + final String clusterName = "\"cluster_name\": \"cn\","; + try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{" + nodesHeader + clusterName + "\"nodes\" : {} }")) { + + ClearRealmCacheResponse response = ClearRealmCacheResponse.fromXContent(parser); + assertNotNull(response); + assertThat(response.getNodes(), empty()); + assertThat(response.getClusterName(), equalTo("cn")); + assertThat(response.getHeader().getSuccessful(), equalTo(1)); + assertThat(response.getHeader().getFailed(), equalTo(1)); + assertThat(response.getHeader().getTotal(), equalTo(2)); + assertThat(response.getHeader().getFailures(), hasSize(1)); + assertThat(response.getHeader().getFailures().get(0).getMessage(), containsString("reason=test")); + } + + try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + "{" + nodesHeader + clusterName + "\"nodes\" : { " + + "\"id1\": { \"name\": \"a\"}, " + + "\"id2\": { \"name\": \"b\"}" + + "}}")) { + + ClearRealmCacheResponse response = ClearRealmCacheResponse.fromXContent(parser); + assertNotNull(response); + assertThat(response.getNodes(), hasSize(2)); + assertThat(response.getNodes().get(0).getId(), equalTo("id1")); + assertThat(response.getNodes().get(0).getName(), equalTo("a")); + assertThat(response.getNodes().get(1).getId(), equalTo("id2")); + assertThat(response.getNodes().get(1).getName(), equalTo("b")); + } + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/ApplicationResourcePrivilegesTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/ApplicationResourcePrivilegesTests.java new file mode 100644 index 0000000000000..9575363a40963 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/ApplicationResourcePrivilegesTests.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security.user.privileges; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; + +import static org.hamcrest.Matchers.is; + +public class ApplicationResourcePrivilegesTests extends AbstractXContentTestCase { + + @Override + protected ApplicationResourcePrivileges createTestInstance() { + return new ApplicationResourcePrivileges(randomAlphaOfLengthBetween(1, 8), + Arrays.asList(randomArray(1, 8, size -> new String[size], () -> randomAlphaOfLengthBetween(1, 8))), + Arrays.asList(randomArray(1, 8, size -> new String[size], () -> randomAlphaOfLengthBetween(1, 8)))); + } + + @Override + protected ApplicationResourcePrivileges doParseInstance(XContentParser parser) throws IOException { + return ApplicationResourcePrivileges.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testEmptyApplicationName() { + final String emptyApplicationName = randomBoolean() ? "" : null; + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new ApplicationResourcePrivileges(emptyApplicationName, + Arrays.asList(randomArray(1, 8, size -> new String[size], () -> randomAlphaOfLengthBetween(1, 8))), + Arrays.asList(randomArray(1, 8, size -> new String[size], () -> randomAlphaOfLengthBetween(1, 8))))); + assertThat(e.getMessage(), is("application privileges must have an application name")); + } + + public void testEmptyPrivileges() { + final Collection emptyPrivileges = randomBoolean() ? Collections.emptyList() : null; + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new ApplicationResourcePrivileges(randomAlphaOfLengthBetween(1, 8), + emptyPrivileges, + Arrays.asList(randomArray(1, 8, size -> new String[size], () -> randomAlphaOfLengthBetween(1, 8))))); + assertThat(e.getMessage(), is("application privileges must define at least one privilege")); + } + + public void testEmptyResources() { + final Collection emptyResources = randomBoolean() ? Collections.emptyList() : null; + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new ApplicationResourcePrivileges(randomAlphaOfLengthBetween(1, 8), + Arrays.asList(randomArray(1, 8, size -> new String[size], () -> randomAlphaOfLengthBetween(1, 8))), + emptyResources)); + assertThat(e.getMessage(), is("application privileges must refer to at least one resource")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/GlobalPrivilegesTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/GlobalPrivilegesTests.java new file mode 100644 index 0000000000000..bb1e933089189 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/user/privileges/GlobalPrivilegesTests.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security.user.privileges; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.is; + +public class GlobalPrivilegesTests extends AbstractXContentTestCase { + + private static long idCounter = 0; + + @Override + protected GlobalPrivileges createTestInstance() { + final List privilegeList = Arrays + .asList(randomArray(1, 4, size -> new GlobalOperationPrivilege[size], () -> buildRandomGlobalScopedPrivilege())); + return new GlobalPrivileges(privilegeList); + } + + @Override + protected GlobalPrivileges doParseInstance(XContentParser parser) throws IOException { + return GlobalPrivileges.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; // true really means inserting bogus privileges + } + + public void testEmptyOrNullGlobalOperationPrivilege() { + final Map privilege = randomBoolean() ? null : Collections.emptyMap(); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new GlobalOperationPrivilege(randomAlphaOfLength(2), randomAlphaOfLength(2), privilege)); + assertThat(e.getMessage(), is("Privileges cannot be empty or null")); + } + + public void testEmptyOrNullGlobalPrivileges() { + final List privileges = randomBoolean() ? null : Collections.emptyList(); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new GlobalPrivileges(privileges)); + assertThat(e.getMessage(), is("Privileges cannot be empty or null")); + } + + public void testDuplicateGlobalOperationPrivilege() { + final GlobalOperationPrivilege privilege = buildRandomGlobalScopedPrivilege(); + // duplicate + final GlobalOperationPrivilege privilege2 = new GlobalOperationPrivilege(privilege.getCategory(), privilege.getOperation(), + new HashMap<>(privilege.getRaw())); + final GlobalPrivileges globalPrivilege = new GlobalPrivileges(Arrays.asList(privilege, privilege2)); + assertThat(globalPrivilege.getPrivileges().size(), is(1)); + assertThat(globalPrivilege.getPrivileges().iterator().next(), is(privilege)); + } + + public void testSameScopeGlobalOperationPrivilege() { + final GlobalOperationPrivilege privilege = buildRandomGlobalScopedPrivilege(); + final GlobalOperationPrivilege sameOperationPrivilege = new GlobalOperationPrivilege(privilege.getCategory(), + privilege.getOperation(), buildRandomGlobalScopedPrivilege().getRaw()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new GlobalPrivileges(Arrays.asList(privilege, sameOperationPrivilege))); + assertThat(e.getMessage(), is("Different privileges for the same category and operation are not permitted")); + } + + private static GlobalOperationPrivilege buildRandomGlobalScopedPrivilege() { + final Map privilege = new HashMap<>(); + for (int i = 0; i < randomIntBetween(1, 4); i++) { + privilege.put(randomAlphaOfLength(2) + idCounter++, randomAlphaOfLengthBetween(1, 4)); + } + return new GlobalOperationPrivilege("application", randomAlphaOfLength(2) + idCounter++, privilege); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/tasks/TaskSubmissionResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/tasks/TaskSubmissionResponseTests.java new file mode 100644 index 0000000000000..4e21b28dd8181 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/tasks/TaskSubmissionResponseTests.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.tasks; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class TaskSubmissionResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + this::createTestInstance, + this::toXContent, + TaskSubmissionResponse::fromXContent) + .supportsUnknownFields(true) + .test(); + } + + private void toXContent(TaskSubmissionResponse response, XContentBuilder xContentBuilder) throws IOException { + xContentBuilder.startObject(); + xContentBuilder.field("task", response.getTask()); + xContentBuilder.endObject(); + } + + private TaskSubmissionResponse createTestInstance() { + String taskId = randomAlphaOfLength(5) + ":" + randomLong(); + return new TaskSubmissionResponse(taskId); + } +} diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index dd19594d29b87..70e496ff2f274 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -310,11 +310,11 @@ private String getElasticUrl( baseUrl = String.format(Locale.ROOT, "https://artifacts.elastic.co/downloads/elasticsearch-plugins/%s", pluginId); } final String platformUrl = - String.format(Locale.ROOT, "%s/%s-%s-%s.zip", baseUrl, pluginId, platform, Version.displayVersion(version, isSnapshot)); + String.format(Locale.ROOT, "%s/%s-%s-%s.zip", baseUrl, pluginId, platform, Build.CURRENT.getQualifiedVersion()); if (urlExists(terminal, platformUrl)) { return platformUrl; } - return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, Version.displayVersion(version, isSnapshot)); + return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, Build.CURRENT.getQualifiedVersion()); } private String nonReleaseUrl(final String hostname, final Version version, final String stagingHash, final String pluginId) { diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index ceff354f15fe8..c93f39902e533 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -936,7 +936,8 @@ public void assertInstallPluginFromUrl( } public void testOfficialPlugin() throws Exception { - String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; + String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + + Build.CURRENT.getQualifiedVersion() + ".zip"; assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false); } @@ -945,7 +946,7 @@ public void testOfficialPluginSnapshot() throws Exception { Locale.ROOT, "https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s.zip", Version.CURRENT, - Version.displayVersion(Version.CURRENT, true)); + Build.CURRENT.getQualifiedVersion()); assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", true); } @@ -954,7 +955,7 @@ public void testInstallReleaseBuildOfPluginOnSnapshotBuild() { Locale.ROOT, "https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s.zip", Version.CURRENT, - Version.displayVersion(Version.CURRENT, true)); + Build.CURRENT.getQualifiedVersion()); // attemping to install a release build of a plugin (no staging ID) on a snapshot build should throw a user exception final UserException e = expectThrows(UserException.class, () -> assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, true)); @@ -965,13 +966,13 @@ public void testInstallReleaseBuildOfPluginOnSnapshotBuild() { public void testOfficialPluginStaging() throws Exception { String url = "https://staging.elastic.co/" + Version.CURRENT + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" - + Version.CURRENT + ".zip"; + + Build.CURRENT.getQualifiedVersion() + ".zip"; assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", false); } public void testOfficialPlatformPlugin() throws Exception { String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Platforms.PLATFORM_NAME + - "-" + Version.CURRENT + ".zip"; + "-" + Build.CURRENT.getQualifiedVersion() + ".zip"; assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false); } @@ -981,13 +982,13 @@ public void testOfficialPlatformPluginSnapshot() throws Exception { "https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s-%s.zip", Version.CURRENT, Platforms.PLATFORM_NAME, - Version.displayVersion(Version.CURRENT, true)); + Build.CURRENT.getQualifiedVersion()); assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", true); } public void testOfficialPlatformPluginStaging() throws Exception { String url = "https://staging.elastic.co/" + Version.CURRENT + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" - + Platforms.PLATFORM_NAME + "-"+ Version.CURRENT + ".zip"; + + Platforms.PLATFORM_NAME + "-"+ Build.CURRENT.getQualifiedVersion() + ".zip"; assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", false); } @@ -1009,10 +1010,13 @@ public void testMavenSha1Backcompat() throws Exception { } public void testOfficialShaMissing() throws Exception { - String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; + String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + + Build.CURRENT.getQualifiedVersion() + ".zip"; MessageDigest digest = MessageDigest.getInstance("SHA-1"); UserException e = expectThrows(UserException.class, () -> - assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false, ".sha1", checksum(digest), null, (b, p) -> null)); + assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false, + ".sha1", checksum(digest), null, (b, p) -> null) + ); assertEquals(ExitCodes.IO_ERROR, e.exitCode); assertEquals("Plugin checksum missing: " + url + ".sha512", e.getMessage()); } @@ -1027,7 +1031,8 @@ public void testMavenShaMissing() throws Exception { } public void testInvalidShaFileMissingFilename() throws Exception { - String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; + String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + + Build.CURRENT.getQualifiedVersion() + ".zip"; MessageDigest digest = MessageDigest.getInstance("SHA-512"); UserException e = expectThrows(UserException.class, () -> assertInstallPluginFromUrl( @@ -1037,7 +1042,8 @@ public void testInvalidShaFileMissingFilename() throws Exception { } public void testInvalidShaFileMismatchFilename() throws Exception { - String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; + String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + + Build.CURRENT.getQualifiedVersion()+ ".zip"; MessageDigest digest = MessageDigest.getInstance("SHA-512"); UserException e = expectThrows(UserException.class, () -> assertInstallPluginFromUrl( @@ -1047,7 +1053,7 @@ public void testInvalidShaFileMismatchFilename() throws Exception { null, false, ".sha512", - checksumAndString(digest, " repository-s3-" + Version.CURRENT + ".zip"), + checksumAndString(digest, " repository-s3-" + Build.CURRENT.getQualifiedVersion() + ".zip"), null, (b, p) -> null)); assertEquals(ExitCodes.IO_ERROR, e.exitCode); @@ -1055,7 +1061,8 @@ public void testInvalidShaFileMismatchFilename() throws Exception { } public void testInvalidShaFileContainingExtraLine() throws Exception { - String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; + String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + + Build.CURRENT.getQualifiedVersion() + ".zip"; MessageDigest digest = MessageDigest.getInstance("SHA-512"); UserException e = expectThrows(UserException.class, () -> assertInstallPluginFromUrl( @@ -1065,7 +1072,7 @@ public void testInvalidShaFileContainingExtraLine() throws Exception { null, false, ".sha512", - checksumAndString(digest, " analysis-icu-" + Version.CURRENT + ".zip\nfoobar"), + checksumAndString(digest, " analysis-icu-" + Build.CURRENT.getQualifiedVersion() + ".zip\nfoobar"), null, (b, p) -> null)); assertEquals(ExitCodes.IO_ERROR, e.exitCode); @@ -1073,7 +1080,8 @@ public void testInvalidShaFileContainingExtraLine() throws Exception { } public void testSha512Mismatch() throws Exception { - String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip"; + String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + + Build.CURRENT.getQualifiedVersion() + ".zip"; UserException e = expectThrows(UserException.class, () -> assertInstallPluginFromUrl( "analysis-icu", @@ -1082,7 +1090,7 @@ public void testSha512Mismatch() throws Exception { null, false, ".sha512", - bytes -> "foobar analysis-icu-" + Version.CURRENT + ".zip", + bytes -> "foobar analysis-icu-" + Build.CURRENT.getQualifiedVersion() + ".zip", null, (b, p) -> null)); assertEquals(ExitCodes.IO_ERROR, e.exitCode); @@ -1101,7 +1109,8 @@ public void testSha1Mismatch() throws Exception { public void testPublicKeyIdMismatchToExpectedPublicKeyId() throws Exception { final String icu = "analysis-icu"; final String url = - "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/" + icu + "-" + Version.CURRENT + ".zip"; + "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/" + icu + "-" + + Build.CURRENT.getQualifiedVersion() + ".zip"; final MessageDigest digest = MessageDigest.getInstance("SHA-512"); /* * To setup a situation where the expected public key ID does not match the public key ID used for signing, we generate a new public @@ -1124,7 +1133,8 @@ public void testPublicKeyIdMismatchToExpectedPublicKeyId() throws Exception { public void testFailedSignatureVerification() throws Exception { final String icu = "analysis-icu"; final String url = - "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/" + icu + "-" + Version.CURRENT + ".zip"; + "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/" + icu + "-" + + Build.CURRENT.getQualifiedVersion() + ".zip"; final MessageDigest digest = MessageDigest.getInstance("SHA-512"); /* * To setup a situation where signature verification fails, we will mutate the input byte array by modifying a single byte to some diff --git a/docs/build.gradle b/docs/build.gradle index e247542ee0817..90848e3cdfb8d 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -1128,3 +1128,33 @@ buildRestTests.setups['remote_cluster_and_leader_index'] = buildRestTests.setups index.number_of_shards: 1 index.soft_deletes.enabled: true ''' + +buildRestTests.setups['seats'] = ''' + - do: + indices.create: + index: seats + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + theatre: + type: keyword + cost: + type: long + - do: + bulk: + index: seats + type: _doc + refresh: true + body: | + {"index":{}} + {"theatre": "Skyline", "cost": 1} + {"index":{}} + {"theatre": "Graye", "cost": 5} + {"index":{}} + {"theatre": "Graye", "cost": 8} + {"index":{}} + {"theatre": "Skyline", "cost": 10}''' diff --git a/docs/java-rest/high-level/ccr/pause_follow.asciidoc b/docs/java-rest/high-level/ccr/pause_follow.asciidoc new file mode 100644 index 0000000000000..08acf7cadce8a --- /dev/null +++ b/docs/java-rest/high-level/ccr/pause_follow.asciidoc @@ -0,0 +1,35 @@ +-- +:api: ccr-pause-follow +:request: PauseFollowRequest +:response: PauseFollowResponse +-- + +[id="{upid}-{api}"] +=== Pause Follow API + + +[id="{upid}-{api}-request"] +==== Request + +The Pause Follow API allows you to pause following by follow index name. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The name of follow index. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the pause follow request was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the pause follow was acknowledge. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/security/clear-realm-cache.asciidoc b/docs/java-rest/high-level/security/clear-realm-cache.asciidoc new file mode 100644 index 0000000000000..5427db148d65e --- /dev/null +++ b/docs/java-rest/high-level/security/clear-realm-cache.asciidoc @@ -0,0 +1,33 @@ + +-- +:api: clear-realm-cache +:request: ClearRealmCacheRequest +:response: ClearRealmCacheResponse +-- + +[id="{upid}-{api}"] +=== Clear Realm Cache API + +[id="{upid}-{api}-request"] +==== Clear Realm Cache Request + +A +{request}+ supports defining the name of realms and usernames that the cache should be cleared +for. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Clear Roles Cache Response + +The returned +{response}+ allows to retrieve information about where the cache was cleared. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> the list of nodes that the cache was cleared on diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 7a2a86a8390ea..dd867d4691a81 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -331,6 +331,7 @@ The Java High Level REST Client supports the following Security APIs: * <> * <> * <<{upid}-clear-roles-cache>> +* <<{upid}-clear-realm-cache>> * <<{upid}-authenticate>> * <> * <> @@ -345,6 +346,7 @@ include::security/disable-user.asciidoc[] include::security/change-password.asciidoc[] include::security/delete-role.asciidoc[] include::security/clear-roles-cache.asciidoc[] +include::security/clear-realm-cache.asciidoc[] include::security/authenticate.asciidoc[] include::security/get-certificates.asciidoc[] include::security/put-role-mapping.asciidoc[] @@ -395,3 +397,14 @@ don't leak into the rest of the documentation. :doc-tests-file!: :upid!: -- + +== CCR APIs + +:upid: {mainid}-ccr +:doc-tests-file: {doc-tests}/CCRDocumentationIT.java + +The Java High Level REST Client supports the following CCR APIs: + +* <<{upid}-ccr-pause-follow>> + +include::ccr/pause_follow.asciidoc[] diff --git a/docs/painless/painless-contexts.asciidoc b/docs/painless/painless-contexts.asciidoc index cc7bc752ec6d9..7c342a3da7a5a 100644 --- a/docs/painless/painless-contexts.asciidoc +++ b/docs/painless/painless-contexts.asciidoc @@ -44,8 +44,10 @@ specialized code may define new ways to use a Painless script. | {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] | Metric aggregation reduce | <> | {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] -| Bucket aggregation | <> +| Bucket script aggregation | <> | {ref}/search-aggregations-pipeline-bucket-script-aggregation.html[Elasticsearch Documentation] +| Bucket selector aggregation | <> + | {ref}/search-aggregations-pipeline-bucket-selector-aggregation.html[Elasticsearch Documentation] | Watcher condition | <> | {xpack-ref}/condition-script.html[Elasticsearch Documentation] | Watcher transform | <> diff --git a/docs/painless/painless-contexts/index.asciidoc b/docs/painless/painless-contexts/index.asciidoc index a71fde0be32a0..0c8c21c06a9be 100644 --- a/docs/painless/painless-contexts/index.asciidoc +++ b/docs/painless/painless-contexts/index.asciidoc @@ -28,7 +28,9 @@ include::painless-metric-agg-combine-context.asciidoc[] include::painless-metric-agg-reduce-context.asciidoc[] -include::painless-bucket-agg-context.asciidoc[] +include::painless-bucket-script-agg-context.asciidoc[] + +include::painless-bucket-selector-agg-context.asciidoc[] include::painless-analysis-predicate-context.asciidoc[] diff --git a/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc deleted file mode 100644 index 3bb4cae3d3bab..0000000000000 --- a/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -[[painless-bucket-agg-context]] -=== Bucket aggregation context - -Use a Painless script in an -{ref}/search-aggregations-pipeline-bucket-script-aggregation.html[bucket aggregation] -to calculate a value as a result in a bucket. - -*Variables* - -`params` (`Map`, read-only):: - User-defined parameters passed in as part of the query. The parameters - include values defined as part of the `buckets_path`. - -*Return* - -numeric:: - The calculated value as the result. - -*API* - -The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc new file mode 100644 index 0000000000000..5a5306016945d --- /dev/null +++ b/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc @@ -0,0 +1,86 @@ +[[painless-bucket-script-agg-context]] +=== Bucket script aggregation context + +Use a Painless script in an +{ref}/search-aggregations-pipeline-bucket-script-aggregation.html[`bucket_script` pipeline aggregation] +to calculate a value as a result in a bucket. + +==== Variables + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. The parameters + include values defined as part of the `buckets_path`. + +==== Return + +numeric:: + The calculated value as the result. + +==== API + +The standard <> is available. + +==== Example + +To run this example, first follow the steps in <>. + +The painless context in a `bucket_script` aggregation provides a `params` map. This map contains both +user-specified custom values, as well as the values from other aggregations specified in the `buckets_path` +property. + +This example takes the values from a min and max aggregation, calculates the difference, +and adds the user-specified base_cost to the result: + +[source,Painless] +-------------------------------------------------- +(params.max - params.min) + params.base_cost +-------------------------------------------------- + +Note that the values are extracted from the `params` map. In context, the aggregation looks like this: + +[source,js] +-------------------------------------------------- +GET /seats/_search +{ + "size": 0, + "aggs": { + "theatres": { + "terms": { + "field": "theatre", + "size": 10 + }, + "aggs": { + "min_cost": { + "min": { + "field": "cost" + } + }, + "max_cost": { + "max": { + "field": "cost" + } + }, + "spread_plus_base": { + "bucket_script": { + "buckets_path": { <1> + "min": "min_cost", + "max": "max_cost" + }, + "script": { + "params": { + "base_cost": 5 <2> + }, + "source": "(params.max - params.min) + params.base_cost" + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:seats] +<1> The `buckets_path` points to two aggregations (`min_cost`, `max_cost`) and adds `min`/`max` variables +to the `params` map +<2> The user-specified `base_cost` is also added to the script's `params` map \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc new file mode 100644 index 0000000000000..8e20cf77c353d --- /dev/null +++ b/docs/painless/painless-contexts/painless-bucket-selector-agg-context.asciidoc @@ -0,0 +1,81 @@ + +[[painless-bucket-selector-agg-context]] +=== Bucket selector aggregation context + +Use a Painless script in an +{ref}/search-aggregations-pipeline-bucket-selector-aggregation.html[`bucket_selector` aggregation] +to determine if a bucket should be retained or filtered out. + +==== Variables + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. The parameters + include values defined as part of the `buckets_path`. + +==== Return + +boolean:: + True if the the bucket should be retained, false if the bucket should be filtered out. + +==== API + + +To run this example, first follow the steps in <>. + +The painless context in a `bucket_selector` aggregation provides a `params` map. This map contains both +user-specified custom values, as well as the values from other aggregations specified in the `buckets_path` +property. + +Unlike some other aggregation contexts, the `bucket_selector` context must return a boolean `true` or `false`. + +This example finds the max of each bucket, adds a user-specified base_cost, and retains all of the +buckets that are greater than `10`. + +[source,Painless] +-------------------------------------------------- +params.max + params.base_cost > 10 +-------------------------------------------------- + +Note that the values are extracted from the `params` map. The script is in the form of an expression +that returns `true` or `false`. In context, the aggregation looks like this: + +[source,js] +-------------------------------------------------- +GET /seats/_search +{ + "size": 0, + "aggs": { + "theatres": { + "terms": { + "field": "theatre", + "size": 10 + }, + "aggs": { + "max_cost": { + "max": { + "field": "cost" + } + }, + "filtering_agg": { + "bucket_selector": { + "buckets_path": { <1> + "max": "max_cost" + }, + "script": { + "params": { + "base_cost": 5 <2> + }, + "source": "params.max + params.base_cost > 10" + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:seats] +<1> The `buckets_path` points to the max aggregations (`max_cost`) and adds `max` variables +to the `params` map +<2> The user-specified `base_cost` is also added to the `params` map \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-field-context.asciidoc b/docs/painless/painless-contexts/painless-field-context.asciidoc index 80307b25ea545..15a9f4255232c 100644 --- a/docs/painless/painless-contexts/painless-field-context.asciidoc +++ b/docs/painless/painless-contexts/painless-field-context.asciidoc @@ -26,3 +26,59 @@ a customized value for each document in the results of a query. *API* The standard <> is available. + + +*Example* + +To run this example, first follow the steps in +<>. + +You can then use these two example scripts to compute custom information +for each search hit and output it to two new fields. + +The first script gets the doc value for the `datetime` field and calls +the `getDayOfWeek` function to determine the corresponding day of the week. + +[source,Painless] +---- +doc['datetime'].value.getDayOfWeek(); +---- + +The second script calculates the number of actors. Actors' names are stored +as a text array in the `actors` field. + +[source,Painless] +---- +params['_source']['actors'].length; <1> +---- + +<1> By default, doc values are not available for text fields. However, + you can still calculate the number of actors by extracting actors + from `_source`. Note that `params['_source']['actors']` is a list. + + +Submit the following request: + +[source,js] +---- +GET seats/_search +{ + "query" : { + "match_all": {} + }, + "script_fields" : { + "day-of-week" : { + "script" : { + "source": "doc['datetime'].value.getDayOfWeek()" + } + }, + "number-of-actors" : { + "script" : { + "source": "params['_source']['actors'].length" + } + } + } +} +---- +// CONSOLE +// TEST[skip: requires setup from other pages] \ No newline at end of file diff --git a/docs/plugins/discovery-azure-classic.asciidoc b/docs/plugins/discovery-azure-classic.asciidoc index 1c1925de878aa..1f973567284e9 100644 --- a/docs/plugins/discovery-azure-classic.asciidoc +++ b/docs/plugins/discovery-azure-classic.asciidoc @@ -377,6 +377,7 @@ This command should give you a JSON result: "build_hash" : "f27399d", "build_date" : "2016-03-30T09:51:41.449Z", "build_snapshot" : false, + "qualified" : "{version_qualified}", "lucene_version" : "{lucene_version}", "minimum_wire_compatibility_version" : "1.2.3", "minimum_index_compatibility_version" : "1.2.3" diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index b065951856a45..ddbe0b16cc6ad 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -208,8 +208,12 @@ The following settings are supported: `storage_class`:: - Sets the S3 storage class type for the backup files. Values may be - `standard`, `reduced_redundancy`, `standard_ia`. Defaults to `standard`. + Sets the S3 storage class for objects stored in the snapshot repository. + Values may be `standard`, `reduced_redundancy`, `standard_ia`. + Defaults to `standard`. Changing this setting on an existing repository + only affects the storage class for newly created objects, resulting in a + mixed usage of storage classes. Additionally, S3 Lifecycle Policies can + be used to manage the storage class of existing objects. Due to the extra complexity with the Glacier class lifecycle, it is not currently supported by the plugin. For more information about the different classes, see http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html[AWS Storage Classes Guide] @@ -304,6 +308,9 @@ You may further restrict the permissions by specifying a prefix within the bucke The bucket needs to exist to register a repository for snapshots. If you did not create the bucket then the repository registration will fail. +Note: Starting in version 7.0, all bucket operations are using the path style access pattern. In previous versions the decision to use virtual hosted style +or path style access was made by the AWS Java SDK. + [[repository-s3-aws-vpc]] [float] ==== AWS VPC Bandwidth Settings diff --git a/docs/reference/aggregations/bucket.asciidoc b/docs/reference/aggregations/bucket.asciidoc index ddb55e8d34c8e..52b27c578929f 100644 --- a/docs/reference/aggregations/bucket.asciidoc +++ b/docs/reference/aggregations/bucket.asciidoc @@ -49,6 +49,8 @@ include::bucket/missing-aggregation.asciidoc[] include::bucket/nested-aggregation.asciidoc[] +include::bucket/parent-aggregation.asciidoc[] + include::bucket/range-aggregation.asciidoc[] include::bucket/reverse-nested-aggregation.asciidoc[] diff --git a/docs/reference/aggregations/bucket/parent-aggregation.asciidoc b/docs/reference/aggregations/bucket/parent-aggregation.asciidoc new file mode 100644 index 0000000000000..056437df5c818 --- /dev/null +++ b/docs/reference/aggregations/bucket/parent-aggregation.asciidoc @@ -0,0 +1,213 @@ +[[search-aggregations-bucket-parent-aggregation]] +=== Parent Aggregation + +A special single bucket aggregation that selects parent documents that have the specified type, as defined in a <>. + +This aggregation has a single option: + +* `type` - The child type that should be selected. + +For example, let's say we have an index of questions and answers. The answer type has the following `join` field in the mapping: + +[source,js] +-------------------------------------------------- +PUT parent_example +{ + "mappings": { + "_doc": { + "properties": { + "join": { + "type": "join", + "relations": { + "question": "answer" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +The `question` document contain a tag field and the `answer` documents contain an owner field. With the `parent` +aggregation the owner buckets can be mapped to the tag buckets in a single request even though the two fields exist in +two different kinds of documents. + +An example of a question document: + +[source,js] +-------------------------------------------------- +PUT parent_example/_doc/1 +{ + "join": { + "name": "question" + }, + "body": "

I have Windows 2003 server and i bought a new Windows 2008 server...", + "title": "Whats the best way to file transfer my site from server to a newer one?", + "tags": [ + "windows-server-2003", + "windows-server-2008", + "file-transfer" + ] +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +Examples of `answer` documents: + +[source,js] +-------------------------------------------------- +PUT parent_example/_doc/2?routing=1 +{ + "join": { + "name": "answer", + "parent": "1" + }, + "owner": { + "location": "Norfolk, United Kingdom", + "display_name": "Sam", + "id": 48 + }, + "body": "

Unfortunately you're pretty much limited to FTP...", + "creation_date": "2009-05-04T13:45:37.030" +} + +PUT parent_example/_doc/3?routing=1&refresh +{ + "join": { + "name": "answer", + "parent": "1" + }, + "owner": { + "location": "Norfolk, United Kingdom", + "display_name": "Troll", + "id": 49 + }, + "body": "

Use Linux...", + "creation_date": "2009-05-05T13:45:37.030" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The following request can be built that connects the two together: + +[source,js] +-------------------------------------------------- +POST parent_example/_search?size=0 +{ + "aggs": { + "top-names": { + "terms": { + "field": "owner.display_name.keyword", + "size": 10 + }, + "aggs": { + "to-questions": { + "parent": { + "type" : "answer" <1> + }, + "aggs": { + "top-tags": { + "terms": { + "field": "tags.keyword", + "size": 10 + } + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +<1> The `type` points to type / mapping with the name `answer`. + +The above example returns the top answer owners and per owner the top question tags. + +Possible response: + +[source,js] +-------------------------------------------------- +{ + "took": 9, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "skipped": 0, + "failed": 0 + }, + "hits": { + "total": 3, + "max_score": null, + "hits": [] + }, + "aggregations": { + "top-names": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": [ + { + "key": "Sam", + "doc_count": 1, <1> + "to-questions": { + "doc_count": 1, <2> + "top-tags": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": [ + { + "key": "file-transfer", + "doc_count": 1 + }, + { + "key": "windows-server-2003", + "doc_count": 1 + }, + { + "key": "windows-server-2008", + "doc_count": 1 + } + ] + } + } + }, + { + "key": "Troll", + "doc_count": 1, + "to-questions": { + "doc_count": 1, + "top-tags": { + "doc_count_error_upper_bound": 0, + "sum_other_doc_count": 0, + "buckets": [ + { + "key": "file-transfer", + "doc_count": 1 + }, + { + "key": "windows-server-2003", + "doc_count": 1 + }, + { + "key": "windows-server-2008", + "doc_count": 1 + } + ] + } + } + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 9/"took": $body.took/] + +<1> The number of answer documents with the tag `Sam`, `Troll`, etc. +<2> The number of question documents that are related to answer documents with the tag `Sam`, `Troll`, etc. diff --git a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc index 09fa707c5db3b..febd9bc8a55d2 100644 --- a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc @@ -7,6 +7,7 @@ etc. This is conceptually very similar to the <> pipeline aggregation, except it provides more functionality. + ==== Syntax A `moving_fn` aggregation looks like this in isolation: diff --git a/docs/reference/cat/plugins.asciidoc b/docs/reference/cat/plugins.asciidoc index 265a9e270f581..4f3bd8f3b5457 100644 --- a/docs/reference/cat/plugins.asciidoc +++ b/docs/reference/cat/plugins.asciidoc @@ -14,24 +14,24 @@ Might look like: ["source","txt",subs="attributes,callouts"] ------------------------------------------------------------------------------ name component version description -U7321H6 analysis-icu {version} The ICU Analysis plugin integrates Lucene ICU module into elasticsearch, adding ICU relates analysis components. -U7321H6 analysis-kuromoji {version} The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into elasticsearch. -U7321H6 analysis-nori {version} The Korean (nori) Analysis plugin integrates Lucene nori analysis module into elasticsearch. -U7321H6 analysis-phonetic {version} The Phonetic Analysis plugin integrates phonetic token filter analysis with elasticsearch. -U7321H6 analysis-smartcn {version} Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into elasticsearch. -U7321H6 analysis-stempel {version} The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into elasticsearch. -U7321H6 analysis-ukrainian {version} The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into elasticsearch. -U7321H6 discovery-azure-classic {version} The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism -U7321H6 discovery-ec2 {version} The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism. -U7321H6 discovery-gce {version} The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism. -U7321H6 ingest-attachment {version} Ingest processor that uses Apache Tika to extract contents -U7321H6 ingest-geoip {version} Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database -U7321H6 ingest-user-agent {version} Ingest processor that extracts information from a user agent -U7321H6 mapper-annotated-text {version} The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index. -U7321H6 mapper-murmur3 {version} The Mapper Murmur3 plugin allows to compute hashes of a field's values at index-time and to store them in the index. -U7321H6 mapper-size {version} The Mapper Size plugin allows document to record their uncompressed size at index time. -U7321H6 store-smb {version} The Store SMB plugin adds support for SMB stores. -U7321H6 transport-nio {version} The nio transport. +U7321H6 analysis-icu {version_qualified} The ICU Analysis plugin integrates Lucene ICU module into elasticsearch, adding ICU relates analysis components. +U7321H6 analysis-kuromoji {version_qualified} The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into elasticsearch. +U7321H6 analysis-nori {version_qualified} The Korean (nori) Analysis plugin integrates Lucene nori analysis module into elasticsearch. +U7321H6 analysis-phonetic {version_qualified} The Phonetic Analysis plugin integrates phonetic token filter analysis with elasticsearch. +U7321H6 analysis-smartcn {version_qualified} Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into elasticsearch. +U7321H6 analysis-stempel {version_qualified} The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into elasticsearch. +U7321H6 analysis-ukrainian {version_qualified} The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into elasticsearch. +U7321H6 discovery-azure-classic {version_qualified} The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism +U7321H6 discovery-ec2 {version_qualified} The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism. +U7321H6 discovery-gce {version_qualified} The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism. +U7321H6 ingest-attachment {version_qualified} Ingest processor that uses Apache Tika to extract contents +U7321H6 ingest-geoip {version_qualified} Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database +U7321H6 ingest-user-agent {version_qualified} Ingest processor that extracts information from a user agent +U7321H6 mapper-annotated-text {version_qualified} The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index. +U7321H6 mapper-murmur3 {version_qualified} The Mapper Murmur3 plugin allows to compute hashes of a field's values at index-time and to store them in the index. +U7321H6 mapper-size {version_qualified} The Mapper Size plugin allows document to record their uncompressed size at index time. +U7321H6 store-smb {version_qualified} The Store SMB plugin adds support for SMB stores. +U7321H6 transport-nio {version_qualified} The nio transport. ------------------------------------------------------------------------------ // TESTRESPONSE[s/([.()])/\\$1/ s/U7321H6/.+/ _cat] diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc index 301c7f7da4998..d9e09f158c494 100644 --- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc @@ -6,6 +6,8 @@ Delete Auto-Follow Pattern ++++ +beta[] + Delete auto-follow patterns. ==== Description diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index 1ff9c9943c9df..ba32a1ee49a67 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -6,6 +6,8 @@ Get Auto-Follow Pattern ++++ +beta[] + Get auto-follow patterns. ==== Description diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index e18b69579d303..f4b53382bda4a 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -6,6 +6,8 @@ Create Auto-Follow Pattern ++++ +beta[] + Creates an auto-follow pattern. ==== Description diff --git a/docs/reference/ccr/apis/ccr-apis.asciidoc b/docs/reference/ccr/apis/ccr-apis.asciidoc index 6411766350d34..f0b23c410eae3 100644 --- a/docs/reference/ccr/apis/ccr-apis.asciidoc +++ b/docs/reference/ccr/apis/ccr-apis.asciidoc @@ -3,6 +3,8 @@ [[ccr-apis]] == Cross-cluster replication APIs +beta[] + You can use the following APIs to perform {ccr} operations. [float] diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index 0efa156b95a49..155c3430bab31 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -6,6 +6,8 @@ Get Follower Stats ++++ +beta[] + Get follower stats. ==== Description @@ -111,6 +113,9 @@ The `shards` array consists of objects containing the following fields: `indices[].shards[].follower_mapping_version`:: (long) the mapping version the follower is synced up to +`indices[].shards[].follower_settings_version`:: + (long) the index settings version the follower is synced up to + `indices[].shards[].total_read_time_millis`:: (long) the total time reads were outstanding, measured from the time a read was sent to the leader to the time a reply was returned to the follower @@ -206,6 +211,7 @@ The API returns the following results: "outstanding_write_requests" : 2, "write_buffer_operation_count" : 64, "follower_mapping_version" : 4, + "follower_settings_version" : 2, "total_read_time_millis" : 32768, "total_read_remote_exec_time_millis" : 16384, "successful_read_requests" : 32, @@ -234,6 +240,7 @@ The API returns the following results: // TESTRESPONSE[s/"outstanding_write_requests" : 2/"outstanding_write_requests" : $body.indices.0.shards.0.outstanding_write_requests/] // TESTRESPONSE[s/"write_buffer_operation_count" : 64/"write_buffer_operation_count" : $body.indices.0.shards.0.write_buffer_operation_count/] // TESTRESPONSE[s/"follower_mapping_version" : 4/"follower_mapping_version" : $body.indices.0.shards.0.follower_mapping_version/] +// TESTRESPONSE[s/"follower_settings_version" : 2/"follower_settings_version" : $body.indices.0.shards.0.follower_settings_version/] // TESTRESPONSE[s/"total_read_time_millis" : 32768/"total_read_time_millis" : $body.indices.0.shards.0.total_read_time_millis/] // TESTRESPONSE[s/"total_read_remote_exec_time_millis" : 16384/"total_read_remote_exec_time_millis" : $body.indices.0.shards.0.total_read_remote_exec_time_millis/] // TESTRESPONSE[s/"successful_read_requests" : 32/"successful_read_requests" : $body.indices.0.shards.0.successful_read_requests/] diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index 7fa4dbdd45591..459f13c8a31c2 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -6,6 +6,8 @@ Pause Follower ++++ +beta[] + Pauses a follower index. ==== Description diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index eb19050961be7..2595d4e0a197e 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -6,6 +6,8 @@ Resume Follower ++++ +beta[] + Resumes a follower index. ==== Description diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc index d84f170417998..1687b9e7b41d7 100644 --- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc @@ -6,6 +6,8 @@ Unfollow ++++ +beta[] + Converts a follower index to a regular index. ==== Description diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index db0005fe3c983..9e6106a6a774d 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -6,6 +6,8 @@ Create Follower ++++ +beta[] + Creates a follower index. ==== Description diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index 9229e4c9406c5..200ad016b431f 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -6,6 +6,8 @@ Get Follower Stats ++++ +beta[] + Get {ccr} stats. ==== Description diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index daa6f298e5ff7..a0f97a659f279 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -3,4 +3,5 @@ [[ccr-getting-started]] == Getting Started +beta[] This is the getting started section of the {ccr} docs. \ No newline at end of file diff --git a/docs/reference/ccr/index.asciidoc b/docs/reference/ccr/index.asciidoc index 1d5e9445a7b1d..e802286af7cd5 100644 --- a/docs/reference/ccr/index.asciidoc +++ b/docs/reference/ccr/index.asciidoc @@ -6,6 +6,8 @@ [partintro] -- +beta[] + * <> * <> diff --git a/docs/reference/ccr/overview.asciidoc b/docs/reference/ccr/overview.asciidoc index 648a981bc5bdb..c0ca8580f68e0 100644 --- a/docs/reference/ccr/overview.asciidoc +++ b/docs/reference/ccr/overview.asciidoc @@ -3,4 +3,5 @@ [[ccr-overview]] == Overview +beta[] This is the overview section of the {ccr} docs. diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 78bccc8bd695d..c38250d31089e 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -123,6 +123,12 @@ Will return, for example: "count": 1 } ], + "pretty_names": [ + { + "pretty_name": "Mac OS X", + "count": 1 + } + ], "mem" : { "total" : "16gb", "total_in_bytes" : 17179869184, diff --git a/docs/reference/ilm/apis/delete-lifecycle.asciidoc b/docs/reference/ilm/apis/delete-lifecycle.asciidoc index 3c8b6c397c07f..510d02ae579d3 100644 --- a/docs/reference/ilm/apis/delete-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/delete-lifecycle.asciidoc @@ -6,15 +6,17 @@ Delete Policy ++++ -Deletes an existing lifecycle policy +Deletes a lifecycle policy. ==== Request -`DELETE _ilm/policy/` +`DELETE _ilm/policy/` ==== Description -Deletes an existing lifecycle policy +Deletes the specified lifecycle policy definition. You cannot delete policies +that are currently in use. If the policy is being used to manage any indices, +the request fails and returns an error. ==== Path Parameters @@ -23,21 +25,15 @@ Deletes an existing lifecycle policy ==== Request Parameters -`timeout`:: - (time units) Specifies the period of time to wait for the completion of the - DELETE operation. When this period of time elapses, the API fails and returns - an error. The default value is `30s`. For more information about time units, - see <>. +include::{docdir}/rest-api/timeoutparms.asciidoc[] -`master_timeout`:: - (time units) Specifies the period of time to wait for the connection with master. - When this period of time elapses, the API fails and returns an error. - The default value is `30s`. For more information about time units, see <>. +==== Authorization +include::ilm-cluster-mgt-privilege.asciidoc[] ==== Examples -The following example deletes an existing policy named `my_policy`: +The following example deletes `my_policy`: ////////////////////////// @@ -77,7 +73,7 @@ DELETE _ilm/policy/my_policy // CONSOLE // TEST[continued] -If the request does not encounter errors, you receive the following result: +When the policy is successfully deleted, you receive the following result: [source,js] -------------------------------------------------- diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc index 95daf0bda1f6b..31d89a5bf0d7d 100644 --- a/docs/reference/ilm/apis/explain.asciidoc +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -1,12 +1,12 @@ [role="xpack"] [testenv="basic"] -[[ilm-explain]] +[[ilm-explain-lifecycle]] === Explain Lifecycle API ++++ Explain Lifecycle ++++ -Shows the current lifecycle status for an index. +Shows an index's current lifecycle status. ==== Request @@ -14,11 +14,10 @@ Shows the current lifecycle status for an index. ==== Description -This API returns information relating to the current lifecycle state of an -index. This includes information such as the currently executing phase, action, -and step and the timestamp when the index entered them. It also shows the -definition of the current phase that is being run and in the event that there -has been a failure, information regarding the failure. +Retrieves information about the index's current lifecycle state, such as +the currently executing phase, action, and step. Shows when the index entered +each one, the definition of the running phase, and information +about any failures. ==== Path Parameters @@ -27,21 +26,15 @@ has been a failure, information regarding the failure. ==== Request Parameters -`timeout`:: - (time units) Specifies the period of time to wait for the completion of the - GET operation. When this period of time elapses, the API fails and returns - an error. The default value is `30s`. For more information about time units, - see <>. +include::{docdir}/rest-api/timeoutparms.asciidoc[] -`master_timeout`:: - (time units) Specifies the period of time to wait for the connection with master. - When this period of time elapses, the API fails and returns an error. - The default value is `30s`. For more information about time units, see <>. +==== Authorization +include::ilm-index-mgt-privilege.asciidoc[] ==== Examples -The following example retrieves the lifecycle state for the index `my_index`: +The following example retrieves the lifecycle state of `my_index`: ////////////////////////// @@ -91,7 +84,8 @@ GET my_index/_ilm/explain // CONSOLE // TEST[continued] -When the index is first taken over by ILM you will see a response like the following: +When management of the index is first taken over by ILM, `explain` shows +that the index is managed and in the `new` phase: [source,js] -------------------------------------------------- @@ -102,12 +96,12 @@ When the index is first taken over by ILM you will see a response like the follo "managed": true, <1> "policy": "my_policy", <2> "lifecycle_date_millis": 1538475653281, <3> - "phase": "new", <4> - "phase_time_millis": 1538475653317, <5> - "action": "complete", <6> - "action_time_millis": 1538475653317, <7> - "step": "complete", <8> - "step_time_millis": 1538475653317 <9> + "phase": "new", + "phase_time_millis": 1538475653317, <4> + "action": "complete", + "action_time_millis": 1538475653317, <5> + "step": "complete", + "step_time_millis": 1538475653317 <6> } } } @@ -121,17 +115,14 @@ When the index is first taken over by ILM you will see a response like the follo ILM the other fields will not be shown <2> The name of the policy which ILM is using for this index <3> The timestamp used for the `min_age` -<4> The current phase -<5> The timestamp for when the index entered the current phase -<6> The current action -<7> The timestamp for when the index entered the current action -<8> The current step -<9> The timestamp for when the index entered the current step +<4> When the index entered the current phase +<5> When the index entered the current action +<6> When the index entered the current step -When the policy is running on the index the response will contain a -`phase_execution` object that describes the exact phase that is being run. +Once the policy is running on the index, the response includes a +`phase_execution` object that shows the definition of the current phase. Changes to the underlying policy will not affect this index until the current -phase definition has been completely executed. +phase completes. [source,js] -------------------------------------------------- @@ -152,9 +143,9 @@ phase definition has been completely executed. "step": "attempt_rollover", "step_time_millis": 1538475653317, "step_time": "2018-10-15T13:45:22.577Z", - "phase_execution": { <1> - "policy": "my_lifecycle3", <2> - "phase_definition": { <3> + "phase_execution": { + "policy": "my_lifecycle3", + "phase_definition": { <1> "min_age": "0ms", "actions": { "rollover": { @@ -162,9 +153,9 @@ phase definition has been completely executed. } } }, - "version": 3, <4> - "modified_date": "2018-10-15T13:21:41.576Z", <5> - "modified_date_in_millis": 1539609701576 <6> + "version": 3, <2> + "modified_date": "2018-10-15T13:21:41.576Z", <3> + "modified_date_in_millis": 1539609701576 <4> } } } @@ -172,16 +163,14 @@ phase definition has been completely executed. -------------------------------------------------- // CONSOLE // TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] -<1> The phase execution information for this index in its current phase -<2> The policy that this phase definition was loaded from -<3> The phase definition itself. This is the JSON for the phase loaded from the -policy at the time the index entered the current phase -<4> The version of the policy at the time the phase definition was loaded -<5> The last modified date of the policy at the time the phase definition was loaded -<6> The last modified epoch time of the policy at the time the phase definition was loaded - +<1> The JSON phase definition loaded from the specified policy when the index +entered this phase +<2> The version of the policy that was loaded +<3> The date the loaded policy was last modified +<4> The epoch time when the loaded policy was last modified -If the policy is waiting for a step to complete for the index, the response will contain step information such as: +If {ILM} is waiting for a step to complete, the response includes status +information for the step that's being performed on the index. [source,js] -------------------------------------------------- @@ -236,14 +225,12 @@ If the policy is waiting for a step to complete for the index, the response will -------------------------------------------------- // CONSOLE // TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] -<1> `step_info` shows information about what ILM is waiting for on this index. -In this case we are waiting for all shard copies of the index to be active. +<1> Status of the step that's in progress. -If the index is in the ERROR step, something has gone wrong when executing a -step in the policy and will need to be investigated and resolved for the index -to make progress. TO help determine how to resolve the error the explain response -will show the step that failed in `failed_step`, and the information on the error -that occurred in `step_info`. +If the index is in the ERROR step, something went wrong while executing a +step in the policy and and you will need to take action for the index to proceed +to the next step. To help you diagnose the problem, the explain response shows +the step that failed and the step info provides information about the error. [source,js] -------------------------------------------------- @@ -291,6 +278,5 @@ that occurred in `step_info`. -------------------------------------------------- // CONSOLE // TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] -<1> The step that caused an error -<2> Information on the error that occurred. In this case the next index already -existed when the rollover operation was performed +<1> The step that caused the error +<2> What went wrong diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index dbc8a572903b3..4b9b2a3bd3b60 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -6,18 +6,17 @@ Get Policy ++++ -Retrieves an existing policy +Retrieves a lifecycle policy. ==== Request `GET _ilm/policy` -`GET _ilm/policy/` +`GET _ilm/policy/` ==== Description -This API returns a policy definition along with some of its metadata like -its last modified date and version. If no path parameters are provided, then -all the policies defined will be returned. +Returns the specified policy definition. Includes the policy version and last +modified date. If no policy is specified, returns all defined policies. ==== Path Parameters @@ -26,21 +25,15 @@ all the policies defined will be returned. ==== Request Parameters -`timeout`:: - (time units) Specifies the period of time to wait for the completion of the - GET operation. When this period of time elapses, the API fails and returns - an error. The default value is `30s`. For more information about time units, - see <>. +include::{docdir}/rest-api/timeoutparms.asciidoc[] -`master_timeout`:: - (time units) Specifies the period of time to wait for the connection with master. - When this period of time elapses, the API fails and returns an error. - The default value is `30s`. For more information about time units, see <>. +==== Authorization +include::ilm-cluster-mgt-privilege.asciidoc[] ==== Examples -The following example retrieves the policy named `my_policy`: +The following example retrieves `my_policy`: ////////////////////////// @@ -80,7 +73,8 @@ GET _ilm/policy // CONSOLE // TEST[continued] -If the request does not encounter errors, you receive the following result: + +If the request succeeds, the body of the response contains the policy definition: [source,js] -------------------------------------------------- @@ -111,5 +105,5 @@ If the request does not encounter errors, you receive the following result: -------------------------------------------------- // CONSOLE // TESTRESPONSE[s/"modified_date": 82392349/"modified_date": $body.my_policy.modified_date/] -<1> The version of the policy. This is increased whenever the policy is updated -<2> The timestamp when this policy was last modified +<1> The policy version is incremented whenever the policy is updated +<2> When this policy was last modified diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc index 8f5d2289ff2ea..4406b805fb5d9 100644 --- a/docs/reference/ilm/apis/get-status.asciidoc +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -1,12 +1,12 @@ [role="xpack"] [testenv="basic"] [[ilm-get-status]] -=== Get ILM Status API +=== Get {ILM} Status API ++++ -Get ILM Status +Get {ILM} Status ++++ -Gets the current status for ILM. +Retrieves the current {ilm} status. ==== Request @@ -14,28 +14,22 @@ Gets the current status for ILM. ==== Description -This API will return the current status of the ILM plugin. The response contains -a `operation_mode` field which shows whether the ILM plugin is `STARTED`, `STOPPING` -or `STOPPED`. This `operation_mode` is controlled by the <> -and <> APIs. +Returns the status of the {ILM} plugin. The `operation_mode` field in the +response shows one of three states: `STARTED`, `STOPPING`, +or `STOPPED`. You can change the status of the {ILM} plugin with the +<> and <> APIs. ==== Request Parameters -`timeout`:: - (time units) Specifies the period of time to wait for the completion of the - get operation. When this period of time elapses, the API fails and returns - an error. The default value is `30s`. For more information about time units, - see <>. +include::{docdir}/rest-api/timeoutparms.asciidoc[] -`master_timeout`:: - (time units) Specifies the period of time to wait for the connection with master. - When this period of time elapses, the API fails and returns an error. - The default value is `30s`. For more information about time units, see <>. +==== Authorization +include::ilm-cluster-mgt-privilege.asciidoc[] ==== Examples -The following example stops the ILM plugin. +The following example gets the {ILM} plugin status. [source,js] -------------------------------------------------- @@ -43,7 +37,7 @@ GET _ilm/status -------------------------------------------------- // CONSOLE -If the request does not encounter errors, you receive the following result: +If the request succeeds, the body of the response shows the operation mode: [source,js] -------------------------------------------------- diff --git a/docs/reference/ilm/apis/ilm-api.asciidoc b/docs/reference/ilm/apis/ilm-api.asciidoc index 49c7d2155d516..351ff44f41fe2 100644 --- a/docs/reference/ilm/apis/ilm-api.asciidoc +++ b/docs/reference/ilm/apis/ilm-api.asciidoc @@ -25,7 +25,7 @@ You can use the following APIs to manage policies on indices. * <> * <> * <> -* <> +* <> include::put-lifecycle.asciidoc[] diff --git a/docs/reference/ilm/apis/ilm-cluster-mgt-privilege.asciidoc b/docs/reference/ilm/apis/ilm-cluster-mgt-privilege.asciidoc new file mode 100644 index 0000000000000..eef1433a9c055 --- /dev/null +++ b/docs/reference/ilm/apis/ilm-cluster-mgt-privilege.asciidoc @@ -0,0 +1,2 @@ +You must have the cluster `manage` privilege to use this API. +For more information, see {stack-ov}/security-privileges.html[Security Privileges]. diff --git a/docs/reference/ilm/apis/ilm-index-mgt-privilege.asciidoc b/docs/reference/ilm/apis/ilm-index-mgt-privilege.asciidoc new file mode 100644 index 0000000000000..c2a3e3fddb5c0 --- /dev/null +++ b/docs/reference/ilm/apis/ilm-index-mgt-privilege.asciidoc @@ -0,0 +1,2 @@ +You must have the `manage` privilege on the indices being managed to use this API. +For more information, see {stack-ov}/security-privileges.html[Security Privileges]. diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc index c34b800856c10..854b1ab317d1b 100644 --- a/docs/reference/ilm/apis/move-to-step.asciidoc +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -1,12 +1,12 @@ [role="xpack"] [testenv="basic"] [[ilm-move-to-step]] -=== Move To Step API +=== Move to Lifecycle Step API ++++ -Move To Step +Move to Step ++++ -Moves a managed index into a specific execution step its policy +Triggers execution of a specific step in the lifecycle policy. ==== Request @@ -14,13 +14,18 @@ Moves a managed index into a specific execution step its policy ==== Description -WARNING: This is an expert API that may lead to unintended data loss. When used, -an index's policy will begin executing at the specified step. It will execute -the step specified even if it has already executed it. Since this is a, potentionally, -dangerous action, specifying both the current step and next step to move to is -required in the body of the request. +WARNING: This operation can result in the loss of data. Manually moving an index +into a specific step executes that step even if it has already been performed. +This is a potentially destructive action and this should be considered an expert +level API. -This API changes the current step for the specified index to the step supplied in the body of the request +Manually moves an index into the specified step and executes that step. +You must specify both the current step and the step to be executed in the +body of the request. + +The request will fail if the current step does not match the step currently +being executed for the index. This is to prevent the index from being moved from +an unexpected step into the next step. ==== Path Parameters @@ -29,22 +34,16 @@ This API changes the current step for the specified index to the step supplied i ==== Request Parameters -`timeout`:: - (time units) Specifies the period of time to wait for the completion of the - move operation. When this period of time elapses, the API fails and returns - an error. The default value is `30s`. For more information about time units, - see <>. +include::{docdir}/rest-api/timeoutparms.asciidoc[] -`master_timeout`:: - (time units) Specifies the period of time to wait for the connection with master. - When this period of time elapses, the API fails and returns an error. - The default value is `30s`. For more information about time units, see <>. +==== Authorization +include::ilm-index-mgt-privilege.asciidoc[] ==== Examples -The following example moves the index `my_index` from the initial step to the -forcemerge step: +The following example moves `my_index` from the initial step to the +`forcemerge` step: ////////////////////////// @@ -102,10 +101,10 @@ POST _ilm/move/my_index -------------------------------------------------- // CONSOLE // TEST[continued] -<1> The step that the index is currently expected to be executing -<2> The step that the index should move to when executing this request +<1> The step that the index is expected to be in +<2> The step that you want to execute -If the request does not encounter errors, you receive the following result: +If the request succeeds, you receive the following result: [source,js] -------------------------------------------------- @@ -116,6 +115,5 @@ If the request does not encounter errors, you receive the following result: // CONSOLE // TESTRESPONSE -NOTE: An error will be returned if the index is now longer executing the step -specified in `current_step`. This is so the index is not moved from an -unexpected step into the `next_step`. +The request will fail if the index is not in the `new` phase as specified +by the `current_step`. diff --git a/docs/reference/ilm/apis/put-lifecycle.asciidoc b/docs/reference/ilm/apis/put-lifecycle.asciidoc index 36650078db652..565598e75cdd8 100644 --- a/docs/reference/ilm/apis/put-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/put-lifecycle.asciidoc @@ -1,25 +1,24 @@ [role="xpack"] [testenv="basic"] [[ilm-put-lifecycle]] -=== Put Lifecycle Policy API +=== Create Lifecycle Policy API ++++ -Put Policy +Create Policy ++++ -Creates or updates an ILM Policy +Creates or updates lifecycle policy. ==== Request -`PUT _ilm/policy/` +`PUT _ilm/policy/` ==== Description -This API creates a new Lifecycle Policy, or updates an existing one with the same -identifier. Each call will replace the existing policy and increment the `version` -associated with the policy. +Creates a lifecycle policy. If the specified policy exists, the policy is +replaced and the policy version is incremented. -NOTE: The `version` is only for informational purposes. Only the latest version -of the policy is stored. +NOTE: Only the latest version of the policy is stored, you cannot revert to +previous versions. ==== Path Parameters @@ -28,17 +27,11 @@ of the policy is stored. ==== Request Parameters -`timeout`:: - (time units) Specifies the period of time to wait for the completion of the - PUT operation. When this period of time elapses, the API fails and returns - an error. The default value is `30s`. For more information about time units, - see <>. +include::{docdir}/rest-api/timeoutparms.asciidoc[] -`master_timeout`:: - (time units) Specifies the period of time to wait for the connection with master. - When this period of time elapses, the API fails and returns an error. - The default value is `30s`. For more information about time units, see <>. +==== Authorization +include::ilm-cluster-mgt-privilege.asciidoc[] ==== Examples @@ -71,7 +64,7 @@ PUT _ilm/policy/my_policy // CONSOLE // TEST -If the request does not encounter errors, you receive the following result: +If the request succeeds, you receive the following result: [source,js] ---- { diff --git a/docs/reference/ilm/apis/remove-policy.asciidoc b/docs/reference/ilm/apis/remove-policy.asciidoc index 8ee313f4e30c3..2811bc476e623 100644 --- a/docs/reference/ilm/apis/remove-policy.asciidoc +++ b/docs/reference/ilm/apis/remove-policy.asciidoc @@ -1,21 +1,22 @@ [role="xpack"] [testenv="basic"] -[[ilm-remove-policy]] -=== Remove Policy On Index API +[[ilm-delete-policy]] +=== Delete Policy from Index API ++++ -Remove Policy From Index +Delete Policy ++++ -Unassigns a policy from a specified index pattern +Removes the assigned lifecycle policy from an index. ==== Request -`DELETE /_ilm` +`POST /_ilm/remove` ==== Description -This action removes a policy from managing an index. It is effectively the same as setting an index's -`index.lifecycle.name` setting to null. +Removes the assigned lifecycle policy and stops managing the specified index. +If an index pattern is specified, removes the assigned policies from all matching +indices. ==== Path Parameters @@ -24,21 +25,15 @@ This action removes a policy from managing an index. It is effectively the same ==== Request Parameters -`timeout`:: - (time units) Specifies the period of time to wait for the completion of the - operation. When this period of time elapses, the API fails and returns - an error. The default value is `30s`. For more information about time units, - see <>. +include::{docdir}/rest-api/timeoutparms.asciidoc[] -`master_timeout`:: - (time units) Specifies the period of time to wait for the connection with master. - When this period of time elapses, the API fails and returns an error. - The default value is `30s`. For more information about time units, see <>. +==== Authorization +include::ilm-cluster-mgt-privilege.asciidoc[] ==== Examples -The following example removes a policy `my_policy` from an index `my_index`. +The following example removes the assigned policy from `my_index`. ////////////////////////// @@ -80,12 +75,12 @@ PUT my_index [source,js] -------------------------------------------------- -DELETE my_index/_ilm +POST my_index/_ilm/remove -------------------------------------------------- // CONSOLE // TEST[continued] -If the request does not encounter errors, you receive the following result: +If the request succeeds, you receive the following result: [source,js] -------------------------------------------------- diff --git a/docs/reference/ilm/apis/retry-policy.asciidoc b/docs/reference/ilm/apis/retry-policy.asciidoc index 7c81f9423ef12..51882c543e283 100644 --- a/docs/reference/ilm/apis/retry-policy.asciidoc +++ b/docs/reference/ilm/apis/retry-policy.asciidoc @@ -3,10 +3,10 @@ [[ilm-retry-policy]] === Retry Policy Execution API ++++ -Retry Policy Execution +Retry Policy ++++ -Retry executing the policy for an index which has errored. +Retry executing the policy for an index that is in the ERROR step. ==== Request @@ -14,10 +14,9 @@ Retry executing the policy for an index which has errored. ==== Description -This API will re-run a policy is currently in the ERROR step. It will set the -policy back to the step where the error occurred and attempt to re-execute it. -Information on whether an index is in the ERROR step can be obtained from the -<> +Sets the policy back to the step where the error occurred and executes the step. +Use the <> to determine if an index is in the ERROR +step. ==== Path Parameters @@ -26,21 +25,15 @@ Information on whether an index is in the ERROR step can be obtained from the ==== Request Parameters -`timeout`:: - (time units) Specifies the period of time to wait for the completion of the - retry operation. When this period of time elapses, the API fails and returns - an error. The default value is `30s`. For more information about time units, - see <>. +include::{docdir}/rest-api/timeoutparms.asciidoc[] -`master_timeout`:: - (time units) Specifies the period of time to wait for the connection with master. - When this period of time elapses, the API fails and returns an error. - The default value is `30s`. For more information about time units, see <>. +==== Authorization +include::ilm-index-mgt-privilege.asciidoc[] ==== Examples -The following example retries the policy for index `my_index`. +The following example retries the policy for `my_index`. [source,js] -------------------------------------------------- @@ -48,7 +41,7 @@ POST my_index/_ilm/retry -------------------------------------------------- // NOTCONSOLE -If the request does not encounter errors, you receive the following result: +If the request succeeds, you receive the following result: [source,js] -------------------------------------------------- diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc index 073a584e4d872..6f2dffe9c3398 100644 --- a/docs/reference/ilm/apis/start.asciidoc +++ b/docs/reference/ilm/apis/start.asciidoc @@ -1,12 +1,12 @@ [role="xpack"] [testenv="basic"] [[ilm-start]] -=== Start ILM API +=== Start {ILM} API ++++ -Start ILM +Start {ILM} ++++ -Start the ILM plugin +Start the {ILM} plugin. ==== Request @@ -14,23 +14,17 @@ Start the ILM plugin ==== Description -This API will start the ILM plugin if it is currently stopped. ILM is started -by default when the cluster is formed so this API is only needed if ILM has -been stopped using the <>. +Starts the {ILM} plugin if it is currently stopped. {ILM} is started +automatically when the cluster is formed. Restarting {ILM} is only +necessary if it has been stopped using the <>. ==== Request Parameters -`timeout`:: - (time units) Specifies the period of time to wait for the completion of the - start operation. When this period of time elapses, the API fails and returns - an error. The default value is `30s`. For more information about time units, - see <>. +include::{docdir}/rest-api/timeoutparms.asciidoc[] -`master_timeout`:: - (time units) Specifies the period of time to wait for the connection with master. - When this period of time elapses, the API fails and returns an error. - The default value is `30s`. For more information about time units, see <>. +==== Authorization +include::ilm-cluster-mgt-privilege.asciidoc[] ==== Examples @@ -78,7 +72,7 @@ POST _ilm/start // CONSOLE // TEST[continued] -If the request does not encounter errors, you receive the following result: +If the request succeeds, you receive the following result: [source,js] -------------------------------------------------- diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc index cdc038adabcfc..f1ffbc09c4461 100644 --- a/docs/reference/ilm/apis/stop.asciidoc +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -1,12 +1,12 @@ [role="xpack"] [testenv="basic"] [[ilm-stop]] -=== Stop ILM API +=== Stop {ILM} API ++++ -Stop ILM +Stop {ILM} ++++ -Stop the ILM plugin. +Stop the {ILM} plugin. ==== Request @@ -14,25 +14,22 @@ Stop the ILM plugin. ==== Description -This API will stop the ILM plugin. This can be used for period where -maintenance is required and ILM should not perform any actions on any indices. -The API will return as soon as the stop request has been acknowledged but the -plugin may not immediately stop but rather need to wait for some operations -to finish before it's stopped. Progress can be seen using the -<> API. +Halts all lifecycle management operations and stops the {ILM} plugin. This is +useful when you are performing maintenance on the cluster and need to prevent +{ILM} from performing any actions on your indices. + +The API returns as soon as the stop request has been acknowledged, but the +plugin might continue to run until in-progress operations complete and the plugin +can be safely stopped. Use the <> API to see +if {ILM} is running. ==== Request Parameters -`timeout`:: - (time units) Specifies the period of time to wait for the response. When this - period of time elapses, the API fails and returns an error. The default value - is `30s`. For more information about time units, see <>. +include::{docdir}/rest-api/timeoutparms.asciidoc[] -`master_timeout`:: - (time units) Specifies the period of time to wait for the connection with master. - When this period of time elapses, the API fails and returns an error. - The default value is `30s`. For more information about time units, see <>. +==== Authorization +include::ilm-cluster-mgt-privilege.asciidoc[] ==== Examples diff --git a/docs/reference/ilm/index.asciidoc b/docs/reference/ilm/index.asciidoc index d85f92fb1c28a..be96689555676 100644 --- a/docs/reference/ilm/index.asciidoc +++ b/docs/reference/ilm/index.asciidoc @@ -1,13 +1,14 @@ [role="xpack"] [testenv="basic"] [[index-lifecycle-management]] -= Managing Indices += Managing the index lifecycle :ilm: index lifecycle management -:ILM: Index lifecycle management +:Ilm: Index lifecycle management +:ILM: ILM [partintro] -- -The <> enable you to automate how you +The <> enable you to automate how you want to manage your indices over time. Rather than simply performing management actions on your indices on a set schedule, you can base actions on other factors such as shard size and performance requirements. diff --git a/docs/reference/ilm/update-lifecycle-policy.asciidoc b/docs/reference/ilm/update-lifecycle-policy.asciidoc index 334b5a953fd0e..97a3131241e22 100644 --- a/docs/reference/ilm/update-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/update-lifecycle-policy.asciidoc @@ -1,9 +1,12 @@ [role="xpack"] [testenv="basic"] [[update-lifecycle-policy]] -== Update lifecycle policy +== Update Lifecycle Policy +++++ +Update Policy +++++ -Updating existing ILM policies is useful to fix mistakes or change +You can update an existing lifecycle policy to fix mistakes or change strategies for newly created indices. It is possible to update policy definitions and an index's `index.lifecycle.name` settings independently. To prevent the situation that phase definitions are modified while currently being executed on an index, each index @@ -173,7 +176,7 @@ PUT my_index // TEST[continued] //// -The <> is useful to introspect managed indices to see which phase definition they are currently executing. +The <> is useful to introspect managed indices to see which phase definition they are currently executing. Using this API, we can find out that `my_index` is currently attempting to be rolled over. [source,js] diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 070892d6f02e7..1a09627741a64 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -828,7 +828,7 @@ include::ingest-node-common-processor.asciidoc[] [[convert-processor]] === Convert Processor -Converts an existing field's value to a different type, such as converting a string to an integer. +Converts a field in the currently ingested document to a different type, such as converting a string to an integer. If the field value is an array, all members will be converted. The supported types include: `integer`, `long`, `float`, `double`, `string`, `boolean`, and `auto`. @@ -857,11 +857,17 @@ include::ingest-node-common-processor.asciidoc[] [source,js] -------------------------------------------------- +PUT _ingest/pipeline/my-pipeline-id { - "convert": { - "field" : "url.port", - "type": "integer" - } + "description": "converts the content of the id field to an integer", + "processors" : [ + { + "convert" : { + "field" : "id", + "type": "integer" + } + } + ] } -------------------------------------------------- // NOTCONSOLE diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index 95881ba83856f..75690875e0eb8 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -495,12 +495,12 @@ The above call returns [float] ==== Document APIs -Index APIs must be call with the `{index}/_doc` path for automatic generation of +Index APIs must be called with the `{index}/_doc` path for automatic generation of the `_id` and `{index}/_doc/{id}` with explicit ids. [source,js] -------------------------------------------------- -PUT index/_doc/1?include_type_name=false +PUT index/_doc/1 { "foo": "bar" } @@ -512,6 +512,7 @@ PUT index/_doc/1?include_type_name=false { "_index": "index", <1> "_id": "1", + "_type": "_doc", "_version": 1, "result": "created", "_shards": { @@ -526,6 +527,6 @@ PUT index/_doc/1?include_type_name=false // TESTRESPONSE <1> The response does not include a `_type`. -Likewise the <>, <>, -<> and <> APIs do not return a `_type` -key in the response when `include_type_name` is set to `false`. +The <>, <>, <> and <> APIs +will continue to return a `_type` key in the response in 7.0, but it is considered deprecated and will be +removed in 8.0. diff --git a/docs/reference/migration/migrate_7_0/plugins.asciidoc b/docs/reference/migration/migrate_7_0/plugins.asciidoc index 5fcd2bb95261c..f63b0892f3897 100644 --- a/docs/reference/migration/migrate_7_0/plugins.asciidoc +++ b/docs/reference/migration/migrate_7_0/plugins.asciidoc @@ -22,6 +22,13 @@ must now be specified in the client settings instead. See {plugins}/repository-gcs-client.html#repository-gcs-client[Google Cloud Storage Client Settings]. +[float] +==== S3 Repository Plugin + +* The plugin now uses the path style access pattern for all requests. +In previous versions it was automatically determining whether to use virtual hosted style or path style +access. + [float] ==== Analysis Plugin changes @@ -36,3 +43,24 @@ Elasticsearch and requires no plugin. The location of the hosts file has moved from `$ES_PATH_CONF/file-discovery/unicast_hosts.txt` to `$ES_PATH_CONF/unicast_hosts.txt`. See <> for further information. + +[float] +==== Security Extensions + +As a consequence of the <>, +the `getRealmSettings` method has been removed from the `SecurityExtension` class, +and the `settings` method on `RealmConfig` now returns the node's (global) settings. +Custom security extensions should register their settings by implementing the standard +`Plugin.getSettings` method, and can retrieve them from `RealmConfig.settings()` or +using one of the `RealmConfig.getSetting` methods. +Each realm setting should be defined as an `AffixSetting` as shown in the example below: +[source,java] +-------------------------------------------------- +Setting.AffixSetting MY_SETTING = Setting.affixKeySetting( + "xpack.security.authc.realms." + MY_REALM_TYPE + ".", "my_setting", + key -> Setting.simpleString(key, properties) +); +-------------------------------------------------- + +The `RealmSettings.simpleString` method can be used as a convenience for the above. + diff --git a/docs/reference/migration/migrate_7_0/settings.asciidoc b/docs/reference/migration/migrate_7_0/settings.asciidoc index 7eab006efdc30..d983994b0c517 100644 --- a/docs/reference/migration/migrate_7_0/settings.asciidoc +++ b/docs/reference/migration/migrate_7_0/settings.asciidoc @@ -67,6 +67,26 @@ in the cluster state, or set on dynamic settings updates, we will automatically upgrade the setting from `search.remote.*` to `cluster.remote.*`. The fallback settings will be removed in 8.0.0. +[float] +[[audit-logfile-local-node-info]] +==== Audit logfile local node info + +The following settings have been removed: + +- `xpack.security.audit.logfile.prefix.emit_node_host_address`, instead use + `xpack.security.audit.logfile.emit_node_host_address` +- `xpack.security.audit.logfile.prefix.emit_node_host_name`, instead use + `xpack.security.audit.logfile.emit_node_host_name` +- `xpack.security.audit.logfile.prefix.emit_node_name`, instead use + `xpack.security.audit.logfile.emit_node_name` + +The new settings have the same meaning as the removed ones, but the `prefix` +name component is no longer meaningful as logfile audit entries are structured +JSON documents and are not prefixed by anything. +Moreover, `xpack.security.audit.logfile.emit_node_name` has changed its default +from `true` to `false`. All other settings mentioned before, have kept their +default value of `false`. + [float] [[include-realm-type-in-setting]] ==== Security realms settings @@ -84,7 +104,7 @@ xpack.security.authc.realms: url: "ldaps://ldap.example.com/" -------------------------------------------------- -Must be migrated to: +Must be migrated to: [source,yaml] -------------------------------------------------- xpack.security.authc.realms: diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index 34cbee9c1699f..d7bd58eb33f29 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -15,9 +15,15 @@ To learn about monitoring in general, see {stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. . Enable the collection of monitoring data. Set -`xpack.monitoring.collection.enabled` to `true` on the production cluster. + +`xpack.monitoring.collection.enabled` to `true` on each node in the production +cluster. By default, it is is disabled (`false`). + -- +NOTE: You can specify this setting in either the `elasticsearch.yml` on each +node or across the cluster as a dynamic cluster setting. If {es} +{security-features} are enabled, you must have `monitor` cluster privileges to +view the cluster settings and `manage` cluster privileges to change them. + For example, you can use the following APIs to review and change this setting: [source,js] @@ -31,16 +37,21 @@ PUT _cluster/settings } } ---------------------------------- -// CONSOLE +// CONSOLE For more information, see <> and <>. -- . Disable the default collection of {es} monitoring metrics. Set -`xpack.monitoring.elasticsearch.collection.enabled` to `false` on the production -cluster. + +`xpack.monitoring.elasticsearch.collection.enabled` to `false` on each node in +the production cluster. + -- +NOTE: You can specify this setting in either the `elasticsearch.yml` on each +node or across the cluster as a dynamic cluster setting. If {es} +{security-features} are enabled, you must have `monitor` cluster privileges to +view the cluster settings and `manage` cluster privileges to change them. + For example, you can use the following API to change this setting: [source,js] @@ -96,30 +107,26 @@ You must specify the following settings in the `modules.d/elasticsearch.yml` fil - shard period: 10s hosts: ["http://localhost:9200"] <1> - xpack.enabled: true + xpack.enabled: true <2> ---------------------------------- -<1> This setting identifies the host and port number that are used to access {es}. +<1> This setting identifies the host and port number that are used to access {es}. +<2> This setting ensures that {kib} can read this monitoring data successfully. +That is to say, it's stored in the same location and format as monitoring data +that is sent by <>. -- -.. If {security} is enabled, you must also provide a user ID and password so that -{metricbeat} can collect metrics successfully. +.. If Elastic {security-features} are enabled, you must also provide a user ID +and password so that {metricbeat} can collect metrics successfully. -... Create or identify a user that you want to use to collect the metrics. -+ --- -TIP: There is a `remote_monitoring_user` built-in user that grants the privileges -necessary for {metricbeat} to monitor {stack} products. See -{stack-ov}/built-in-users.html[Built-in users]. - -Alternatively, you can choose a different user and give them the -`remote_monitoring_collector` {stack-ov}/built-in-roles.html[built-in role]. --- +... Create a user on the production cluster that has the +{stack-ov}/built-in-roles.html[`remote_monitoring_collector` built-in role]. +Alternatively, use the {stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. ... Add the `username` and `password` settings to the {es} module configuration file. + -- -For example, add the following settings in the `modules.d/kibana.yml` file: +For example, add the following settings in the `modules.d/elasticsearch.yml` file: [source,yaml] ---------------------------------- @@ -158,19 +165,14 @@ For more information about these configuration options, see {metricbeat-ref}/elasticsearch-output.html[Configure the {es} output]. -- -.. If {security} is enabled on the monitoring cluster, you must provide a valid -user ID and password so that {metricbeat} can send metrics successfully. +.. If {es} {security-features} are enabled on the monitoring cluster, you +must provide a valid user ID and password so that {metricbeat} can send metrics +successfully. -... Create or identify a user that you want to use to send the metrics. -+ --- -TIP: There is a `remote_monitoring_user` built-in user that grants the privileges -necessary for {metricbeat} to monitor {stack} products. See -{stack-ov}/built-in-users.html[Built-in users]. - -Alternatively, you can choose a different user and give them the -`remote_monitoring_agent` {stack-ov}/built-in-roles.html[built-in role]. --- +... Create a user on the monitoring cluster that has the +{stack-ov}/built-in-roles.html[`remote_monitoring_agent` built-in role]. +Alternatively, use the +{stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. ... Add the `username` and `password` settings to the {es} output information in the {metricbeat} configuration file (`metricbeat.yml`): diff --git a/docs/reference/monitoring/configuring-monitoring.asciidoc b/docs/reference/monitoring/configuring-monitoring.asciidoc index 81a9cce4f12ec..e7ed3a4539739 100644 --- a/docs/reference/monitoring/configuring-monitoring.asciidoc +++ b/docs/reference/monitoring/configuring-monitoring.asciidoc @@ -6,8 +6,8 @@ Configuring monitoring ++++ -If you enable the collection of monitoring data in your cluster, you can -optionally collect metrics about {es}. By default, {monitoring} is enabled but +If you enable the Elastic {monitor-features} in your cluster, you can +optionally collect metrics about {es}. By default, monitoring is enabled but data collection is disabled. The following method involves sending the metrics to the monitoring cluster by @@ -20,15 +20,51 @@ monitoring indices. You can also adjust how monitoring data is displayed. To learn about monitoring in general, see {stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. -. To collect monitoring data about your {es} cluster: +. Configure your cluster to collect monitoring data: -.. Verify that the `xpack.monitoring.enabled`, -`xpack.monitoring.collection.enabled`, and -`xpack.monitoring.elasticsearch.collection.enabled` settings are `true` on each -node in the cluster. By default `xpack.monitoring.collection.enabled` is disabled -(`false`), and that overrides `xpack.monitoring.elasticsearch.collection.enabled`, -which defaults to being enabled (`true`). Both settings can be set dynamically -at runtime. For more information, see <>. +.. Verify that the `xpack.monitoring.enabled` setting is `true`, which is its +default value, on each node in the cluster. For more information, see +<>. + +.. Verify that the `xpack.monitoring.elasticsearch.collection.enabled` setting +is `true`, which is its default value, on each node in the cluster. ++ +-- +NOTE: You can specify this setting in either the `elasticsearch.yml` on each +node or across the cluster as a dynamic cluster setting. If {es} +{security-features} are enabled, you must have `monitor` cluster privileges to +view the cluster settings and `manage` cluster privileges to change them. + +For more information, see <> and <>. +-- + +.. Set the `xpack.monitoring.collection.enabled` setting to `true` on each +node in the cluster. By default, it is is disabled (`false`). ++ +-- +NOTE: You can specify this setting in either the `elasticsearch.yml` on each +node or across the cluster as a dynamic cluster setting. If {es} +{security-features} are enabled, you must have `monitor` cluster privileges to +view the cluster settings and `manage` cluster privileges to change them. + +For example, use the following APIs to review and change this setting: + +[source,js] +---------------------------------- +GET _cluster/settings + +PUT _cluster/settings +{ + "persistent": { + "xpack.monitoring.collection.enabled": true + } +} +---------------------------------- +// CONSOLE + +For more +information, see <> and <>. +-- .. Optional: Specify which indices you want to monitor. + @@ -53,62 +89,71 @@ patterns. For example, to include all indices that start with `test` except the `xpack.monitoring.collection.interval` setting 10 seconds. See <>. -. Optional: Configure your cluster to route monitoring data from sources such -as {kib}, Beats, and Logstash to a monitoring cluster: - -.. Verify that `xpack.monitoring.collection.enabled` settings are `true` on each -node in the cluster. - -.. {stack-ov}/xpack-monitoring.html[Configure {monitoring} across the Elastic Stack]. - . Identify where to store monitoring data. + -- -By default, {monitoring} uses a `local` exporter that indexes monitoring data -on the same cluster. See <> and <>. +By default, the data is stored on the same cluster by using a +<>. -Alternatively, you can use an `http` exporter to send data to a separate -monitoring cluster. See <>. +Alternatively, you can use an <> to send data to +a separate _monitoring cluster_. For more information about typical monitoring architectures, see {stack-ov}/how-monitoring-works.html[How Monitoring Works]. -- -. If {security} is enabled and you are using an `http` exporter to send data to - a dedicated monitoring cluster: +. If you choose to use an `http` exporter: -.. Create a user on the monitoring cluster that has the -{stack-ov}/built-in-roles.html#built-in-roles-remote-monitoring-agent[`remote_monitoring_agent` built-in role]. -For example, the following request creates a `remote_monitor` user that has the -`remote_monitoring_agent` role: +.. On the cluster that you want to monitor (often called the _production cluster_), +configure each node to send metrics to your monitoring cluster. Configure an +HTTP exporter in the `xpack.monitoring.exporters` settings in the +`elasticsearch.yml` file. For example: + -- -[source, sh] ---------------------------------------------------------------- -POST /_xpack/security/user/remote_monitor -{ - "password" : "changeme", - "roles" : [ "remote_monitoring_agent"], - "full_name" : "Internal Agent For Remote Monitoring" -} ---------------------------------------------------------------- -// CONSOLE -// TEST[skip:needs-gold+-license] +[source,yaml] +-------------------------------------------------- +xpack.monitoring.exporters: + id1: + type: http + host: ["http://es-mon-1:9200", "http://es-mon2:9200"] +-------------------------------------------------- -- -.. On each node in the cluster that is being monitored, configure the `http` -exporter to use the appropriate credentials when data is shipped to the -monitoring cluster. +.. If the Elastic {security-features} are enabled on the monitoring cluster, you +must provide appropriate credentials when data is shipped to the monitoring cluster: + +... Create a user on the monitoring cluster that has the +{stack-ov}/built-in-roles.html[`remote_monitoring_agent` built-in role]. +Alternatively, use the +{stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. + +... Add the user ID and password settings to the HTTP exporter settings in the +`elasticsearch.yml` file on each node. + + -- -If SSL/TLS is enabled on the monitoring cluster, you must use the HTTPS protocol -in the `host` setting. You must also include the CA certificate in each node's -trusted certificates in order to verify the identities of the nodes in the -monitoring cluster. +For example: -The following example specifies the location of the PEM encoded certificate with -the `certificate_authorities` setting: +[source,yaml] +-------------------------------------------------- +xpack.monitoring.exporters: + id1: + type: http + host: ["http://es-mon-1:9200", "http://es-mon2:9200"] + auth.username: remote_monitoring_user + auth.password: YOUR_PASSWORD +-------------------------------------------------- +-- + +.. If you configured the monitoring cluster to use +<>, you must use the HTTPS protocol in +the `host` setting. You must also specify the trusted CA certificates that will +be used to verify the identity of the nodes in the monitoring cluster. +*** To add a CA certificate to an {es} node's trusted certificates, you can +specify the location of the PEM encoded certificate with the +`certificate_authorities` setting. For example: ++ +-- [source,yaml] -------------------------------------------------- xpack.monitoring.exporters: @@ -116,18 +161,17 @@ xpack.monitoring.exporters: type: http host: ["https://es-mon1:9200", "https://es-mon2:9200"] auth: - username: remote_monitor <1> - password: changeme + username: remote_monitoring_user + password: YOUR_PASSWORD ssl: certificate_authorities: [ "/path/to/ca.crt" ] - id2: - type: local -------------------------------------------------- -<1> The `username` and `password` parameters provide the user credentials. - -Alternatively, you can configure trusted certificates using a truststore -(a Java Keystore file that contains the certificates): +-- +*** Alternatively, you can configure trusted certificates using a truststore +(a Java Keystore file that contains the certificates). For example: ++ +-- [source,yaml] -------------------------------------------------- xpack.monitoring.exporters: @@ -135,25 +179,28 @@ xpack.monitoring.exporters: type: http host: ["https://es-mon1:9200", "https://es-mon2:9200"] auth: - username: remote_monitor - password: changeme + username: remote_monitoring_user + password: YOUR_PASSWORD ssl: truststore.path: /path/to/file truststore.password: password - id2: - type: local -------------------------------------------------- -- -. If {security} is enabled and you want to visualize monitoring data in {kib}, -you must create users that have access to the {kib} indices and permission to -read from the monitoring indices. +. Configure your cluster to route monitoring data from sources such as {kib}, +Beats, and {ls} to the monitoring cluster. The +`xpack.monitoring.collection.enabled` setting must be `true` on each node in the +cluster. For information about configuring each product to collect and send +monitoring data, see {stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. + +. If you updated settings in the `elasticsearch.yml` files on your production +cluster, restart {es}. See <> and <>. + -- -You set up {monitoring} UI users on the cluster where the monitoring data is -stored, that is to say the monitoring cluster. To grant all of the necessary permissions, assign users the -`monitoring_user` and `kibana_user` roles. For more information, see -{stack-ov}/mapping-roles.html[Mapping users and groups to roles]. +TIP: You may want to temporarily {ref}/modules-cluster.html[disable shard +allocation] before you restart your nodes to avoid unnecessary shard +reallocation during the install process. + -- . Optional: diff --git a/docs/reference/monitoring/index.asciidoc b/docs/reference/monitoring/index.asciidoc index d6a55f44585dd..13e7314f8af5f 100644 --- a/docs/reference/monitoring/index.asciidoc +++ b/docs/reference/monitoring/index.asciidoc @@ -5,38 +5,46 @@ [partintro] -- -{monitoring} enables you to easily monitor the health of your {es} cluster. The -monitoring metrics are collected from each node and stored in {es} indices. +The Elastic {monitor-features} enable you to easily monitor the health of +your {es} cluster. The monitoring metrics are collected from each node and +stored in {es} indices. + +TIP: In production environments, it is recommended to store the monitoring data +in a separate _monitoring cluster_. See +{stack-ov}/monitoring-production.html[Monitoring in a production environment]. Each {es} node is considered unique based on its persistent UUID, which is written on first start to its <> directory, which defaults to `./data`. -All settings associated with {monitoring} in {es} must be set in either the +All settings associated with monitoring in {es} must be set in either the `elasticsearch.yml` file for each node or, where possible, in the dynamic cluster settings. For more information, see <>. [[es-monitoring-overview]] -{es} is also at the core of {monitoring} across the Elastic Stack. In all cases, -{monitoring} documents are just ordinary JSON documents built by monitoring each -Elastic Stack component at some collection interval, then indexing those -documents into the monitoring cluster. Each component in the stack is -responsible for monitoring itself and then forwarding those documents to {es} -for both routing and indexing (storage). - -The routing and indexing processes in {es} are handled by what are called -<> and -<>. In the past, collectors and exporters -were considered to be part of a monitoring "agent", but that term is generally -not used anymore. +{es} is also at the core of monitoring across the {stack}. In all cases, +monitoring documents are just ordinary JSON documents built by monitoring each +{stack} component at some collection interval, then indexing those +documents into the monitoring cluster. + +Each component in the stack is responsible for monitoring itself and then +forwarding those documents to the {es} production cluster for both routing and +indexing (storage). The routing and indexing processes in {es} are handled by +what are called <> and +<>. + +beta[] Alternatively, in 6.4 and later, you can use {metricbeat} to collect +monitoring data about {kib} and ship it directly to the monitoring cluster, +rather than routing it through the production cluster. In 6.5 and later, you +can also use {metricbeat} to collect and ship data about {es}. You can view monitoring data from {kib} where it’s easy to spot issues at a glance or delve into the system behavior over time to diagnose operational issues. In addition to the built-in status warnings, you can also set up custom alerts based on the data in the monitoring indices. -For an introduction to monitoring your Elastic stack, including Beats, Logstash, -and {kib}, see {xpack-ref}/xpack-monitoring.html[Monitoring the Elastic Stack]. +For an introduction to monitoring your {stack}, including Beats, {ls}, and {kib}, +see {stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. -- diff --git a/docs/reference/query-dsl/multi-match-query.asciidoc b/docs/reference/query-dsl/multi-match-query.asciidoc index 296689db289d4..512eee4900b41 100644 --- a/docs/reference/query-dsl/multi-match-query.asciidoc +++ b/docs/reference/query-dsl/multi-match-query.asciidoc @@ -63,7 +63,9 @@ index settings, which in turn defaults to `*`. `*` extracts all fields in the ma are eligible to term queries and filters the metadata fields. All extracted fields are then combined to build a query. -WARNING: There is a limit of no more than 1024 fields being queried at once. +WARNING: There is a limit on the number of fields that can be queried +at once. It is defined by the `indices.query.bool.max_clause_count` <> +which defaults to 1024. [[multi-match-types]] [float] diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index 08465278a67dd..f80cd2e8e9358 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -60,8 +60,11 @@ The `query_string` top level parameters include: specified. Defaults to the `index.query.default_field` index settings, which in turn defaults to `*`. `*` extracts all fields in the mapping that are eligible to term queries and filters the metadata fields. All extracted fields are then -combined to build a query when no prefix field is provided. There is a limit of -no more than 1024 fields being queried at once. +combined to build a query when no prefix field is provided. + +WARNING: There is a limit on the number of fields that can be queried +at once. It is defined by the `indices.query.bool.max_clause_count` <> +which defaults to 1024. |`default_operator` |The default operator used if no explicit operator is specified. For example, with a default operator of `OR`, the query diff --git a/docs/reference/query-dsl/range-query.asciidoc b/docs/reference/query-dsl/range-query.asciidoc index 620a175ff39a5..c5087d52f905e 100644 --- a/docs/reference/query-dsl/range-query.asciidoc +++ b/docs/reference/query-dsl/range-query.asciidoc @@ -139,3 +139,26 @@ GET _search // CONSOLE <1> This date will be converted to `2014-12-31T23:00:00 UTC`. <2> `now` is not affected by the `time_zone` parameter (dates must be stored as UTC). + +[[querying-range-fields]] +==== Querying range fields + +`range` queries can be used on fields of type <>, allowing to +match a range specified in the query with a range field value in the document. +The `relation` parameter controls how these two ranges are matched: + +[horizontal] +`WITHIN`:: + + Matches documents who's range field is entirely within the query's range. + +`CONTAINS`:: + + Matches documents who's range field entirely contains the query's range. + +`INTERSECTS`:: + + Matches documents who's range field intersects the query's range. + This is the default value when querying range fields. + +For examples, see <> mapping type. diff --git a/docs/reference/query-dsl/simple-query-string-query.asciidoc b/docs/reference/query-dsl/simple-query-string-query.asciidoc index 99fbc131c1be3..4ec97ff49bd71 100644 --- a/docs/reference/query-dsl/simple-query-string-query.asciidoc +++ b/docs/reference/query-dsl/simple-query-string-query.asciidoc @@ -31,8 +31,11 @@ The `simple_query_string` top level parameters include: |`fields` |The fields to perform the parsed query against. Defaults to the `index.query.default_field` index settings, which in turn defaults to `*`. `*` extracts all fields in the mapping that are eligible to term queries and filters -the metadata fields. There is a limit of no more than 1024 fields being queried -at once. +the metadata fields. + +WARNING: There is a limit on the number of fields that can be queried +at once. It is defined by the `indices.query.bool.max_clause_count` <> +which defaults to 1024. |`default_operator` |The default operator used if no explicit operator is specified. For example, with a default operator of `OR`, the query diff --git a/docs/reference/rest-api/timeoutparms.asciidoc b/docs/reference/rest-api/timeoutparms.asciidoc new file mode 100644 index 0000000000000..fc8f5208219b2 --- /dev/null +++ b/docs/reference/rest-api/timeoutparms.asciidoc @@ -0,0 +1,11 @@ +`timeout`:: + (time units) Specifies the period of time to wait for a response. If no + response is received before the timeout expires, the request fails and + returns an error. Defaults to `30s`. For more information about + time units, see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for a connection to the + master node. If no response is received before the timeout expires, the request + fails and returns an error. Defaults to `30s`. For more information about + time units, see <>. diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index 2da11c14b5804..a506f6219fc19 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -79,6 +79,10 @@ This highlighter can be used on fields with `term_vector` set to for things like phrase matches being sorted above term matches when highlighting a Boosting Query that boosts phrase matches over term matches +[WARNING] +The `fvh` highlighter does not support span queries. If you need support for +span queries, try an alternative highlighter, such as the `unified` highlighter. + [[offsets-strategy]] ==== Offsets Strategy To create meaningful search snippets from the terms being queried, diff --git a/docs/reference/settings/audit-settings.asciidoc b/docs/reference/settings/audit-settings.asciidoc index 69045dca0a2db..011a66d62b3b8 100644 --- a/docs/reference/settings/audit-settings.asciidoc +++ b/docs/reference/settings/audit-settings.asciidoc @@ -58,7 +58,6 @@ event types such as `authentication_failed`. The default value is `false`. -- IMPORTANT: No filtering is performed when auditing, so sensitive data may be audited in plain text when including the request body in audit events. - -- [[node-audit-settings]] @@ -86,6 +85,35 @@ changes the setting in the config file, the node id will persist across cluster restarts and the administrator cannot change it. The default value is `true`. +[[audit-event-ignore-policies]] +==== Audit Logfile Event Ignore Policies + +These settings affect the {stack-ov}/audit-log-output.html#audit-log-ignore-policy[ignore policies] +that enable fine-grained control over which audit events are printed to the log file. +All of the settings with the same policy name combine to form a single policy. +If an event matches all of the conditions for a specific policy, it is ignored +and not printed. + +`xpack.security.audit.logfile.events.ignore_filters..users`:: +A list of user names or wildcards. The specified policy will +not print audit events for users matching these values. + +`xpack.security.audit.logfile.events.ignore_filters..realms`:: +A list of authentication realm names or wildcards. The specified policy will +not print audit events for users in these realms. + +`xpack.security.audit.logfile.events.ignore_filters..roles`:: +A list of role names or wildcards. The specified policy will +not print audit events for users that have these roles. If the user has several +roles, some of which are *not* covered by the policy, the policy will +*not* cover this event. + +`xpack.security.audit.logfile.events.ignore_filters..indices`:: +A list of index names or wildcards. The specified policy will +not print audit events when all the indices in the event match +these values. If the event concerns several indices, some of which are +*not* covered by the policy, the policy will *not* cover this event. + [[index-audit-settings]] ==== Audit Log Indexing Configuration Settings diff --git a/docs/reference/setup/install/check-running.asciidoc b/docs/reference/setup/install/check-running.asciidoc index 7b95a10158d2f..663442a456c65 100644 --- a/docs/reference/setup/install/check-running.asciidoc +++ b/docs/reference/setup/install/check-running.asciidoc @@ -24,6 +24,7 @@ which should give you a response something like this: "build_hash" : "f27399d", "build_date" : "2016-03-30T09:51:41.449Z", "build_snapshot" : false, + "qualified" : "{version_qualified}", "lucene_version" : "{lucene_version}", "minimum_wire_compatibility_version" : "1.2.3", "minimum_index_compatibility_version" : "1.2.3" diff --git a/docs/reference/setup/sysconfig.asciidoc b/docs/reference/setup/sysconfig.asciidoc index 971d62a9d4d4b..8b548202c80a5 100644 --- a/docs/reference/setup/sysconfig.asciidoc +++ b/docs/reference/setup/sysconfig.asciidoc @@ -13,6 +13,7 @@ The following settings *must* be considered before going to production: * <> * <> * <> +* <> [[dev-vs-prod]] [float] @@ -40,3 +41,5 @@ include::sysconfig/virtual-memory.asciidoc[] include::sysconfig/threads.asciidoc[] include::sysconfig/dns-cache.asciidoc[] + +include::sysconfig/executable-jna-tmpdir.asciidoc[] diff --git a/docs/reference/setup/sysconfig/executable-jna-tmpdir.asciidoc b/docs/reference/setup/sysconfig/executable-jna-tmpdir.asciidoc new file mode 100644 index 0000000000000..0ede64d57b701 --- /dev/null +++ b/docs/reference/setup/sysconfig/executable-jna-tmpdir.asciidoc @@ -0,0 +1,24 @@ +[[executable-jna-tmpdir]] +=== JNA temporary directory not mounted with `noexec` + +[NOTE] +This is only relevant for Linux. + +Elasticsearch uses the Java Native Access (JNA) library for executing some +platform-dependent native code. On Linux, the native code backing this library +is extracted at runtime from the JNA archive. By default, this code is extracted +to the Elasticsearch temporary directory which defaults to a sub-directory of +`/tmp`. Alternatively, this location can be controlled with the JVM flag +`-Djna.tmpdir=`. As the native library is mapped into the JVM virtual +address space as executable, the underlying mount point of the location that +this code is extracted to must *not* be mounted with `noexec` as this prevents +the JVM process from being able to map this code as executable. On some hardened +Linux installations this is a default mount option for `/tmp`. One indication +that the underlying mount is mounted with `noexec` is that at startup JNA will +fail to load with a `java.lang.UnsatisfiedLinkerError` exception with a message +along the lines of `failed to map segment from shared object`. Note that the +exception message can differ amongst JVM versions. Additionally, the components +of Elasticsearch that rely on execution of native code via JNA will fail with +messages indicating that it is `because JNA is not available`. If you are seeing +such error messages, you must remount the temporary directory used for JNA to +not be mounted with `noexec`. diff --git a/docs/reference/setup/sysconfig/swap.asciidoc b/docs/reference/setup/sysconfig/swap.asciidoc index 43ec9f44eb50f..c936cdf964370 100644 --- a/docs/reference/setup/sysconfig/swap.asciidoc +++ b/docs/reference/setup/sysconfig/swap.asciidoc @@ -95,13 +95,14 @@ Systems using `systemd`:: Set `LimitMEMLOCK` to `infinity` in the <>. -Another possible reason why `mlockall` can fail is that the temporary directory -(usually `/tmp`) is mounted with the `noexec` option. This can be solved by -specifying a new temp directory using the `ES_JAVA_OPTS` environment variable: +Another possible reason why `mlockall` can fail is that +<>. This can be solved by specifying +a new temporary directory for JNA using the `ES_JAVA_OPTS` environment variable: [source,sh] -------------- -export ES_JAVA_OPTS="$ES_JAVA_OPTS -Djava.io.tmpdir=/path/to/temp/dir" +export ES_JAVA_OPTS="$ES_JAVA_OPTS -Djna.tmpdir=" ./bin/elasticsearch -------------- diff --git a/libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java b/libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java index 493d809f9dc33..46d19d2a814fe 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java +++ b/libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java @@ -277,5 +277,4 @@ public static void fsync(final Path fileToSync, final boolean isDir) throws IOEx throw ioe; } } - } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index aaca4f9b1860f..75e0087831a62 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -44,7 +44,6 @@ import org.apache.lucene.analysis.core.DecimalDigitFilter; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.LetterTokenizer; -import org.apache.lucene.analysis.core.LowerCaseTokenizer; import org.apache.lucene.analysis.core.UpperCaseFilter; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.cz.CzechAnalyzer; @@ -308,7 +307,8 @@ public Map> getTokenizers() { tokenizers.put("char_group", CharGroupTokenizerFactory::new); tokenizers.put("classic", ClassicTokenizerFactory::new); tokenizers.put("letter", LetterTokenizerFactory::new); - tokenizers.put("lowercase", LowerCaseTokenizerFactory::new); + // TODO deprecate and remove in API + tokenizers.put("lowercase", XLowerCaseTokenizerFactory::new); tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new); tokenizers.put("PathHierarchy", PathHierarchyTokenizerFactory::new); tokenizers.put("pattern", PatternTokenizerFactory::new); @@ -503,7 +503,8 @@ public List getPreConfiguredTokenizers() { () -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE), null)); tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1), null)); tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new, null)); - tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", LowerCaseTokenizer::new, () -> new TokenFilterFactory() { + // TODO deprecate and remove in API + tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", XLowerCaseTokenizer::new, () -> new TokenFilterFactory() { @Override public String name() { return "lowercase"; diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java index 69acb411d105f..ae20a5fbf5a86 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java @@ -33,7 +33,7 @@ public class LegacyDelimitedPayloadTokenFilterFactory extends DelimitedPayloadTo LegacyDelimitedPayloadTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, env, name, settings); - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0)) { throw new IllegalArgumentException( "[delimited_payload_filter] is not supported for new indices, use [delimited_payload] instead"); } diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java index 8d99ec1d1a15d..72649239a30d7 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenFilterFactory.java @@ -43,7 +43,7 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { this.maxGram = settings.getAsInt("max_gram", 2); int ngramDiff = maxGram - minGram; if (ngramDiff > maxAllowedNgramDiff) { - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0)) { throw new IllegalArgumentException( "The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to: [" + maxAllowedNgramDiff + "] but was [" + ngramDiff + "]. This limit can be set by changing the [" diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenizerFactory.java index b00797428b79a..e811d0fbc4bda 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/NGramTokenizerFactory.java @@ -91,7 +91,7 @@ static CharMatcher parseTokenChars(List characterClasses) { this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); int ngramDiff = maxGram - minGram; if (ngramDiff > maxAllowedNgramDiff) { - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0)) { throw new IllegalArgumentException( "The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to: [" + maxAllowedNgramDiff + "] but was [" + ngramDiff + "]. This limit can be set by changing the [" diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizer.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizer.java new file mode 100644 index 0000000000000..3f11c52858aa4 --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizer.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.CharacterUtils; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; +import org.apache.lucene.analysis.util.CharTokenizer; + +import java.io.IOException; + +@Deprecated +class XLowerCaseTokenizer extends Tokenizer { + + private int offset = 0, bufferIndex = 0, dataLen = 0, finalOffset = 0; + + private static final int IO_BUFFER_SIZE = 4096; + + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); + + private final CharacterUtils.CharacterBuffer ioBuffer = CharacterUtils.newCharacterBuffer(IO_BUFFER_SIZE); + + @Override + public final boolean incrementToken() throws IOException { + clearAttributes(); + int length = 0; + int start = -1; // this variable is always initialized + int end = -1; + char[] buffer = termAtt.buffer(); + while (true) { + if (bufferIndex >= dataLen) { + offset += dataLen; + CharacterUtils.fill(ioBuffer, input); // read supplementary char aware with CharacterUtils + if (ioBuffer.getLength() == 0) { + dataLen = 0; // so next offset += dataLen won't decrement offset + if (length > 0) { + break; + } else { + finalOffset = correctOffset(offset); + return false; + } + } + dataLen = ioBuffer.getLength(); + bufferIndex = 0; + } + // use CharacterUtils here to support < 3.1 UTF-16 code unit behavior if the char based methods are gone + final int c = Character.codePointAt(ioBuffer.getBuffer(), bufferIndex, ioBuffer.getLength()); + final int charCount = Character.charCount(c); + bufferIndex += charCount; + + if (Character.isLetter(c)) { // if it's a token char + if (length == 0) { // start of token + assert start == -1; + start = offset + bufferIndex - charCount; + end = start; + } else if (length >= buffer.length-1) { // check if a supplementary could run out of bounds + buffer = termAtt.resizeBuffer(2+length); // make sure a supplementary fits in the buffer + } + end += charCount; + length += Character.toChars(Character.toLowerCase(c), buffer, length); // buffer it, normalized + int maxTokenLen = CharTokenizer.DEFAULT_MAX_WORD_LEN; + if (length >= maxTokenLen) { // buffer overflow! make sure to check for >= surrogate pair could break == test + break; + } + } else if (length > 0) { // at non-Letter w/ chars + break; // return 'em + } + } + + termAtt.setLength(length); + assert start != -1; + offsetAtt.setOffset(correctOffset(start), finalOffset = correctOffset(end)); + return true; + + } + + @Override + public final void end() throws IOException { + super.end(); + // set final offset + offsetAtt.setOffset(finalOffset, finalOffset); + } + + @Override + public void reset() throws IOException { + super.reset(); + bufferIndex = 0; + offset = 0; + dataLen = 0; + finalOffset = 0; + ioBuffer.reset(); // make sure to reset the IO buffer!! + } + +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenizerFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java similarity index 71% rename from modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenizerFactory.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java index 8c913a33cfe4c..4cd5b07fe484a 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LowerCaseTokenizerFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/XLowerCaseTokenizerFactory.java @@ -20,26 +20,21 @@ package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.core.LowerCaseTokenizer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractTokenizerFactory; -import org.elasticsearch.index.analysis.MultiTermAwareComponent; -public class LowerCaseTokenizerFactory extends AbstractTokenizerFactory implements MultiTermAwareComponent { +@Deprecated +// NORELEASE we should prevent the usage on indices created after 7.0 in order to be able to remove in 8 +public class XLowerCaseTokenizerFactory extends AbstractTokenizerFactory { - LowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + public XLowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { super(indexSettings, settings); } @Override public Tokenizer create() { - return new LowerCaseTokenizer(); - } - - @Override - public Object getMultiTermComponent() { - return this; + return new XLowerCaseTokenizer(); } } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java index f9fca66cc54a1..99e882c622085 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java @@ -48,7 +48,7 @@ protected Map> getTokenizers() { tokenizers.put("edgengram", EdgeNGramTokenizerFactory.class); tokenizers.put("classic", ClassicTokenizerFactory.class); tokenizers.put("letter", LetterTokenizerFactory.class); - tokenizers.put("lowercase", LowerCaseTokenizerFactory.class); + // tokenizers.put("lowercase", XLowerCaseTokenizerFactory.class); tokenizers.put("pathhierarchy", PathHierarchyTokenizerFactory.class); tokenizers.put("pattern", PatternTokenizerFactory.class); tokenizers.put("uax29urlemail", UAX29URLEmailTokenizerFactory.class); @@ -223,7 +223,7 @@ protected Map> getPreConfiguredTokenFilters() { protected Map> getPreConfiguredTokenizers() { Map> tokenizers = new TreeMap<>(super.getPreConfiguredTokenizers()); tokenizers.put("keyword", null); - tokenizers.put("lowercase", null); + tokenizers.put("lowercase", Void.class); tokenizers.put("classic", null); tokenizers.put("uax_url_email", org.apache.lucene.analysis.standard.UAX29URLEmailTokenizerFactory.class); tokenizers.put("path_hierarchy", null); diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..b536c887eab0c --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +8db13c6e146c851614c9f862f1eac67431f9b509 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 4904c89e62f89..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cc072b68aac06a2fb9569ab7adce05302f130948 \ No newline at end of file diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index 74f3bc743aeb2..70e36ed85a496 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -154,7 +154,7 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < items.length; i++) { items[i] = Item.readItem(in); } - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { tookInMillis = in.readVLong(); } } @@ -166,7 +166,7 @@ public void writeTo(StreamOutput out) throws IOException { for (Item item : items) { item.writeTo(out); } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { out.writeVLong(tookInMillis); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index 40062f2cb75ad..cd50de3025ae8 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -33,7 +33,6 @@ import org.objectweb.asm.commons.GeneratorAdapter; import java.lang.invoke.MethodType; -import java.lang.reflect.Constructor; import java.lang.reflect.Method; import java.security.AccessControlContext; import java.security.AccessController; @@ -333,42 +332,6 @@ private void writeNeedsMethods(Class clazz, ClassWriter writer, MainMethodRes } } - Constructor compile(Compiler compiler, String scriptName, String source, Map params) { - final CompilerSettings compilerSettings = buildCompilerSettings(params); - - // Check we ourselves are not being called by unprivileged code. - SpecialPermission.check(); - - // Create our loader (which loads compiled code with no permissions). - final Loader loader = AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Loader run() { - return compiler.createLoader(getClass().getClassLoader()); - } - }); - - try { - // Drop all permissions to actually compile the code itself. - return AccessController.doPrivileged(new PrivilegedAction>() { - @Override - public Constructor run() { - String name = scriptName == null ? source : scriptName; - Constructor constructor = compiler.compile(loader, new MainMethodReserved(), name, source, compilerSettings); - - try { - return constructor; - } catch (Exception exception) { // Catch everything to let the user know this is something caused internally. - throw new IllegalStateException( - "An internal error occurred attempting to define the script [" + name + "].", exception); - } - } - }, COMPILATION_CONTEXT); - // Note that it is safe to catch any of the following errors since Painless is stateless. - } catch (OutOfMemoryError | StackOverflowError | VerifyError | Exception e) { - throw convertToScriptException(source, e); - } - } - void compile(Compiler compiler, Loader loader, MainMethodReserved reserved, String scriptName, String source, Map params) { final CompilerSettings compilerSettings = buildCompilerSettings(params); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java index 1adc953deb52e..ae96c8b3b7944 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/BaseClassTests.java @@ -19,16 +19,15 @@ package org.elasticsearch.painless; -import org.elasticsearch.painless.lookup.PainlessLookup; -import org.elasticsearch.painless.lookup.PainlessLookupBuilder; import org.elasticsearch.painless.spi.Whitelist; +import org.elasticsearch.script.ScriptContext; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.startsWith; @@ -38,10 +37,40 @@ */ public class BaseClassTests extends ScriptTestCase { - private final PainlessLookup painlessLookup = PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS); + protected Map, List> scriptContexts() { + Map, List> contexts = new HashMap<>(); + contexts.put(Gets.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(NoArgs.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(OneArg.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ArrayArg.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(PrimitiveArrayArg.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(DefArrayArg.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ManyArgs.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(VarArgs.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(DefaultMethods.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ReturnsVoid.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ReturnsPrimitiveBoolean.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ReturnsPrimitiveInt.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ReturnsPrimitiveFloat.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(ReturnsPrimitiveDouble.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(NoArgsConstant.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(WrongArgsConstant.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(WrongLengthOfArgConstant.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(UnknownArgType.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(UnknownReturnType.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(UnknownArgTypeInArray.CONTEXT, Whitelist.BASE_WHITELISTS); + contexts.put(TwoExecuteMethods.CONTEXT, Whitelist.BASE_WHITELISTS); + return contexts; + } public abstract static class Gets { + public interface Factory { + Gets newInstance(String testString, int testInt, Map params); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("gets", Factory.class); + private final String testString; private final int testInt; private final Map testMap; @@ -58,115 +87,137 @@ public Gets(String testString, int testInt, Map testMap) { public String getTestString() { return testString; } - public int getTestInt() { return Math.abs(testInt); } - public Map getTestMap() { return testMap == null ? new HashMap<>() : testMap; } } - public void testGets() throws Exception { - Compiler compiler = new Compiler(Gets.class, null, null, painlessLookup); Map map = new HashMap<>(); map.put("s", 1); - assertEquals(1, ((Gets)scriptEngine.compile(compiler, null, "testInt", emptyMap()).newInstance("s", -1, null)).execute()); + assertEquals(1, scriptEngine.compile("testGets0", "testInt", Gets.CONTEXT, emptyMap()).newInstance("s", -1, null).execute()); assertEquals(Collections.emptyMap(), - ((Gets)scriptEngine.compile(compiler, null, "testMap", emptyMap()).newInstance("s", -1, null)).execute()); - assertEquals(Collections.singletonMap("1", "1"), ((Gets)scriptEngine.compile( - compiler, null, "testMap", emptyMap()).newInstance("s", -1, Collections.singletonMap("1", "1"))).execute()); - assertEquals("s", ((Gets)scriptEngine.compile(compiler, null, "testString", emptyMap()).newInstance("s", -1, null)).execute()); - assertEquals(map, ((Gets)scriptEngine.compile( - compiler, null, "testMap.put(testString, testInt); testMap", emptyMap()).newInstance("s", -1, null)).execute()); + scriptEngine.compile("testGets1", "testMap", Gets.CONTEXT, emptyMap()).newInstance("s", -1, null).execute()); + assertEquals(Collections.singletonMap("1", "1"), + scriptEngine.compile("testGets2", "testMap", Gets.CONTEXT, emptyMap()) + .newInstance("s", -1, Collections.singletonMap("1", "1")).execute()); + assertEquals("s", scriptEngine.compile("testGets3", "testString", Gets.CONTEXT, emptyMap()).newInstance("s", -1, null).execute()); + assertEquals(map, + scriptEngine.compile("testGets4", "testMap.put(testString, testInt); testMap", Gets.CONTEXT, emptyMap()) + .newInstance("s", -1, null).execute()); } public abstract static class NoArgs { + public interface Factory { + NoArgs newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("noargs", Factory.class); + public static final String[] PARAMETERS = new String[] {}; public abstract Object execute(); } public void testNoArgs() throws Exception { - Compiler compiler = new Compiler(NoArgs.class, null, null, painlessLookup); - assertEquals(1, ((NoArgs)scriptEngine.compile(compiler, null, "1", emptyMap()).newInstance()).execute()); - assertEquals("foo", ((NoArgs)scriptEngine.compile(compiler, null, "'foo'", emptyMap()).newInstance()).execute()); + assertEquals(1, scriptEngine.compile("testNoArgs0", "1", NoArgs.CONTEXT, emptyMap()).newInstance().execute()); + assertEquals("foo", scriptEngine.compile("testNoArgs1", "'foo'", NoArgs.CONTEXT, emptyMap()).newInstance().execute()); Exception e = expectScriptThrows(IllegalArgumentException.class, () -> - scriptEngine.compile(compiler, null, "doc", emptyMap())); + scriptEngine.compile("testNoArgs2", "doc", NoArgs.CONTEXT, emptyMap())); assertEquals("Variable [doc] is not defined.", e.getMessage()); - // _score was once embedded into painless by deep magic e = expectScriptThrows(IllegalArgumentException.class, () -> - scriptEngine.compile(compiler, null, "_score", emptyMap())); + scriptEngine.compile("testNoArgs3", "_score", NoArgs.CONTEXT, emptyMap())); assertEquals("Variable [_score] is not defined.", e.getMessage()); String debug = Debugger.toString(NoArgs.class, "int i = 0", new CompilerSettings()); - /* Elasticsearch requires that scripts that return nothing return null. We hack that together by returning null from scripts that - * return Object if they don't return anything. */ assertThat(debug, containsString("ACONST_NULL")); assertThat(debug, containsString("ARETURN")); } public abstract static class OneArg { + public interface Factory { + OneArg newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("onearg", Factory.class); + public static final String[] PARAMETERS = new String[] {"arg"}; public abstract Object execute(Object arg); } public void testOneArg() throws Exception { - Compiler compiler = new Compiler(OneArg.class, null, null, painlessLookup); Object rando = randomInt(); - assertEquals(rando, ((OneArg)scriptEngine.compile(compiler, null, "arg", emptyMap()).newInstance()).execute(rando)); + assertEquals(rando, scriptEngine.compile("testOneArg0", "arg", OneArg.CONTEXT, emptyMap()).newInstance().execute(rando)); rando = randomAlphaOfLength(5); - assertEquals(rando, ((OneArg)scriptEngine.compile(compiler, null, "arg", emptyMap()).newInstance()).execute(rando)); - - Compiler noargs = new Compiler(NoArgs.class, null, null, painlessLookup); - Exception e = expectScriptThrows(IllegalArgumentException.class, () -> - scriptEngine.compile(noargs, null, "doc", emptyMap())); - assertEquals("Variable [doc] is not defined.", e.getMessage()); - // _score was once embedded into painless by deep magic - e = expectScriptThrows(IllegalArgumentException.class, () -> - scriptEngine.compile(noargs, null, "_score", emptyMap())); - assertEquals("Variable [_score] is not defined.", e.getMessage()); + assertEquals(rando, scriptEngine.compile("testOneArg1", "arg", OneArg.CONTEXT, emptyMap()).newInstance().execute(rando)); } public abstract static class ArrayArg { + public interface Factory { + ArrayArg newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("arrayarg", Factory.class); + public static final String[] PARAMETERS = new String[] {"arg"}; public abstract Object execute(String[] arg); } public void testArrayArg() throws Exception { - Compiler compiler = new Compiler(ArrayArg.class, null, null, painlessLookup); String rando = randomAlphaOfLength(5); assertEquals(rando, - ((ArrayArg)scriptEngine.compile(compiler, null, "arg[0]", emptyMap()).newInstance()).execute(new String[] {rando, "foo"})); + scriptEngine.compile("testArrayArg0", "arg[0]", ArrayArg.CONTEXT, emptyMap()) + .newInstance().execute(new String[] {rando, "foo"})); } public abstract static class PrimitiveArrayArg { + public interface Factory { + PrimitiveArrayArg newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("primitivearrayarg", Factory.class); + public static final String[] PARAMETERS = new String[] {"arg"}; public abstract Object execute(int[] arg); } public void testPrimitiveArrayArg() throws Exception { - Compiler compiler = new Compiler(PrimitiveArrayArg.class, null, null, painlessLookup); int rando = randomInt(); - assertEquals(rando, ((PrimitiveArrayArg)scriptEngine.compile( - compiler, null, "arg[0]", emptyMap()).newInstance()).execute(new int[] {rando, 10})); + assertEquals(rando, + scriptEngine.compile("PrimitiveArrayArg0", "arg[0]", PrimitiveArrayArg.CONTEXT, emptyMap()) + .newInstance().execute(new int[] {rando, 10})); } public abstract static class DefArrayArg { + public interface Factory { + DefArrayArg newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("defarrayarg", Factory.class); + public static final String[] PARAMETERS = new String[] {"arg"}; public abstract Object execute(Object[] arg); } public void testDefArrayArg()throws Exception { - Compiler compiler = new Compiler(DefArrayArg.class, null, null, painlessLookup); Object rando = randomInt(); assertEquals(rando, - ((DefArrayArg)scriptEngine.compile(compiler, null, "arg[0]", emptyMap()).newInstance()).execute(new Object[] {rando, 10})); + scriptEngine.compile("testDefArray0", "arg[0]", DefArrayArg.CONTEXT, emptyMap()) + .newInstance().execute(new Object[] {rando, 10})); rando = randomAlphaOfLength(5); assertEquals(rando, - ((DefArrayArg)scriptEngine.compile(compiler, null, "arg[0]", emptyMap()).newInstance()).execute(new Object[] {rando, 10})); - assertEquals(5, ((DefArrayArg)scriptEngine.compile( - compiler, null, "arg[0].length()", emptyMap()).newInstance()).execute(new Object[] {rando, 10})); + scriptEngine.compile("testDefArray1", "arg[0]", DefArrayArg.CONTEXT, emptyMap()) + .newInstance().execute(new Object[] {rando, 10})); + assertEquals(5, scriptEngine.compile( + "testDefArray2", "arg[0].length()", DefArrayArg.CONTEXT, emptyMap()) + .newInstance().execute(new Object[] {rando, 10})); } public abstract static class ManyArgs { + public interface Factory { + ManyArgs newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("manyargs", Factory.class); + public static final String[] PARAMETERS = new String[] {"a", "b", "c", "d"}; public abstract Object execute(int a, int b, int c, int d); public abstract boolean needsA(); @@ -175,41 +226,53 @@ public abstract static class ManyArgs { public abstract boolean needsD(); } public void testManyArgs() throws Exception { - Compiler compiler = new Compiler(ManyArgs.class, null, null, painlessLookup); int rando = randomInt(); - assertEquals(rando, ((ManyArgs)scriptEngine.compile(compiler, null, "a", emptyMap()).newInstance()).execute(rando, 0, 0, 0)); - assertEquals(10, ((ManyArgs)scriptEngine.compile(compiler, null, "a + b + c + d", emptyMap()).newInstance()).execute(1, 2, 3, 4)); + assertEquals(rando, + scriptEngine.compile("testManyArgs0", "a", ManyArgs.CONTEXT, emptyMap()).newInstance().execute(rando, 0, 0, 0)); + assertEquals(10, + scriptEngine.compile("testManyArgs1", "a + b + c + d", ManyArgs.CONTEXT, emptyMap()).newInstance().execute(1, 2, 3, 4)); // While we're here we can verify that painless correctly finds used variables - ManyArgs script = (ManyArgs)scriptEngine.compile(compiler, null, "a", emptyMap()).newInstance(); + ManyArgs script = scriptEngine.compile("testManyArgs2", "a", ManyArgs.CONTEXT, emptyMap()).newInstance(); assertTrue(script.needsA()); assertFalse(script.needsB()); assertFalse(script.needsC()); assertFalse(script.needsD()); - script = (ManyArgs)scriptEngine.compile(compiler, null, "a + b + c", emptyMap()).newInstance(); + script = scriptEngine.compile("testManyArgs3", "a + b + c", ManyArgs.CONTEXT, emptyMap()).newInstance(); assertTrue(script.needsA()); assertTrue(script.needsB()); assertTrue(script.needsC()); assertFalse(script.needsD()); - script = (ManyArgs)scriptEngine.compile(compiler, null, "a + b + c + d", emptyMap()).newInstance(); + script = scriptEngine.compile("testManyArgs4", "a + b + c + d", ManyArgs.CONTEXT, emptyMap()).newInstance(); assertTrue(script.needsA()); assertTrue(script.needsB()); assertTrue(script.needsC()); assertTrue(script.needsD()); } - public abstract static class VarargTest { + public abstract static class VarArgs { + public interface Factory { + VarArgs newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("varargs", Factory.class); + public static final String[] PARAMETERS = new String[] {"arg"}; public abstract Object execute(String... arg); } - public void testVararg() throws Exception { - Compiler compiler = new Compiler(VarargTest.class, null, null, painlessLookup); + public void testVarArgs() throws Exception { assertEquals("foo bar baz", - ((VarargTest)scriptEngine.compile(compiler, null, "String.join(' ', Arrays.asList(arg))", emptyMap()).newInstance()) - .execute("foo", "bar", "baz")); + scriptEngine.compile("testVarArgs0", "String.join(' ', Arrays.asList(arg))", VarArgs.CONTEXT, emptyMap()) + .newInstance().execute("foo", "bar", "baz")); } public abstract static class DefaultMethods { + public interface Factory { + DefaultMethods newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("defaultmethods", Factory.class); + public static final String[] PARAMETERS = new String[] {"a", "b", "c", "d"}; public abstract Object execute(int a, int b, int c, int d); public Object executeWithOne() { @@ -220,28 +283,38 @@ public Object executeWithASingleOne(int a, int b, int c) { } } public void testDefaultMethods() throws Exception { - Compiler compiler = new Compiler(DefaultMethods.class, null, null, painlessLookup); int rando = randomInt(); - assertEquals(rando, ((DefaultMethods)scriptEngine.compile(compiler, null, "a", emptyMap()).newInstance()).execute(rando, 0, 0, 0)); assertEquals(rando, - ((DefaultMethods)scriptEngine.compile(compiler, null, "a", emptyMap()).newInstance()).executeWithASingleOne(rando, 0, 0)); + scriptEngine.compile("testDefaultMethods0", "a", DefaultMethods.CONTEXT, emptyMap()).newInstance().execute(rando, 0, 0, 0)); + assertEquals(rando, + scriptEngine.compile("testDefaultMethods1", "a", DefaultMethods.CONTEXT, emptyMap()) + .newInstance().executeWithASingleOne(rando, 0, 0)); assertEquals(10, - ((DefaultMethods)scriptEngine.compile(compiler, null, "a + b + c + d", emptyMap()).newInstance()).execute(1, 2, 3, 4)); - assertEquals(4, ((DefaultMethods)scriptEngine.compile(compiler, null, "a + b + c + d", emptyMap()).newInstance()).executeWithOne()); - assertEquals(7, ((DefaultMethods)scriptEngine.compile( - compiler, null, "a + b + c + d", emptyMap()).newInstance()).executeWithASingleOne(1, 2, 3)); + scriptEngine.compile("testDefaultMethods2", "a + b + c + d", DefaultMethods.CONTEXT, emptyMap()) + .newInstance().execute(1, 2, 3, 4)); + assertEquals(4, + scriptEngine.compile("testDefaultMethods3", "a + b + c + d", DefaultMethods.CONTEXT, emptyMap()) + .newInstance().executeWithOne()); + assertEquals(7, + scriptEngine.compile("testDefaultMethods4", "a + b + c + d", DefaultMethods.CONTEXT, emptyMap()) + .newInstance().executeWithASingleOne(1, 2, 3)); } public abstract static class ReturnsVoid { + public interface Factory { + ReturnsVoid newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("returnsvoid", Factory.class); + public static final String[] PARAMETERS = new String[] {"map"}; public abstract void execute(Map map); } public void testReturnsVoid() throws Exception { - Compiler compiler = new Compiler(ReturnsVoid.class, null, null, painlessLookup); Map map = new HashMap<>(); - ((ReturnsVoid)scriptEngine.compile(compiler, null, "map.a = 'foo'", emptyMap()).newInstance()).execute(map); - assertEquals(singletonMap("a", "foo"), map); - ((ReturnsVoid)scriptEngine.compile(compiler, null, "map.remove('a')", emptyMap()).newInstance()).execute(map); + scriptEngine.compile("testReturnsVoid0", "map.a = 'foo'", ReturnsVoid.CONTEXT, emptyMap()).newInstance().execute(map); + assertEquals(Collections.singletonMap("a", "foo"), map); + scriptEngine.compile("testReturnsVoid1", "map.remove('a')", ReturnsVoid.CONTEXT, emptyMap()).newInstance().execute(map); assertEquals(emptyMap(), map); String debug = Debugger.toString(ReturnsVoid.class, "int i = 0", new CompilerSettings()); @@ -252,26 +325,38 @@ public void testReturnsVoid() throws Exception { } public abstract static class ReturnsPrimitiveBoolean { + public interface Factory { + ReturnsPrimitiveBoolean newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("returnsprimitiveboolean", Factory.class); + public static final String[] PARAMETERS = new String[] {}; public abstract boolean execute(); } public void testReturnsPrimitiveBoolean() throws Exception { - Compiler compiler = new Compiler(ReturnsPrimitiveBoolean.class, null, null, painlessLookup); - - assertEquals(true, ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "true", emptyMap()).newInstance()).execute()); - assertEquals(false, ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "false", emptyMap()).newInstance()).execute()); - assertEquals(true, - ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "Boolean.TRUE", emptyMap()).newInstance()).execute()); - assertEquals(false, - ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "Boolean.FALSE", emptyMap()).newInstance()).execute()); - - assertEquals(true, - ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "def i = true; i", emptyMap()).newInstance()).execute()); - assertEquals(true, ((ReturnsPrimitiveBoolean)scriptEngine.compile( - compiler, null, "def i = Boolean.TRUE; i", emptyMap()).newInstance()).execute()); - - assertEquals(true, - ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "true || false", emptyMap()).newInstance()).execute()); + assertTrue( + scriptEngine.compile("testReturnsPrimitiveBoolean0", "true", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); + assertFalse( + scriptEngine.compile("testReturnsPrimitiveBoolean1", "false", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); + assertTrue( + scriptEngine.compile("testReturnsPrimitiveBoolean2", "Boolean.TRUE", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); + assertFalse( + scriptEngine.compile("testReturnsPrimitiveBoolean3", "Boolean.FALSE", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); + + assertTrue( + scriptEngine.compile("testReturnsPrimitiveBoolean4", "def i = true; i", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); + assertTrue( + scriptEngine.compile("testReturnsPrimitiveBoolean5", "def i = Boolean.TRUE; i", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); + assertTrue( + scriptEngine.compile("testReturnsPrimitiveBoolean6", "true || false", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); String debug = Debugger.toString(ReturnsPrimitiveBoolean.class, "false", new CompilerSettings()); assertThat(debug, containsString("ICONST_0")); @@ -279,44 +364,67 @@ public void testReturnsPrimitiveBoolean() throws Exception { assertThat(debug, containsString("IRETURN")); Exception e = expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "1L", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveBoolean7", "1L",ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); assertEquals("Cannot cast from [long] to [boolean].", e.getMessage()); e = expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "1.1f", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveBoolean8", "1.1f", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); assertEquals("Cannot cast from [float] to [boolean].", e.getMessage()); e = expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "1.1d", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveBoolean9", "1.1d", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); assertEquals("Cannot cast from [double] to [boolean].", e.getMessage()); expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "def i = 1L; i", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveBoolean10", "def i = 1L; i", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "def i = 1.1f; i", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveBoolean11", "def i = 1.1f; i", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "def i = 1.1d; i", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveBoolean12", "def i = 1.1d; i", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); - assertEquals(false, - ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "int i = 0", emptyMap()).newInstance()).execute()); + assertFalse( + scriptEngine.compile("testReturnsPrimitiveBoolean13", "int i = 0", ReturnsPrimitiveBoolean.CONTEXT, emptyMap()) + .newInstance().execute()); } public abstract static class ReturnsPrimitiveInt { + public interface Factory { + ReturnsPrimitiveInt newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("returnsprimitiveint", Factory.class); + public static final String[] PARAMETERS = new String[] {}; public abstract int execute(); } public void testReturnsPrimitiveInt() throws Exception { - Compiler compiler = new Compiler(ReturnsPrimitiveInt.class, null, null, painlessLookup); - - assertEquals(1, ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "1", emptyMap()).newInstance()).execute()); - assertEquals(1, ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "(int) 1L", emptyMap()).newInstance()).execute()); - assertEquals(1, ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "(int) 1.1d", emptyMap()).newInstance()).execute()); - assertEquals(1, ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "(int) 1.1f", emptyMap()).newInstance()).execute()); + assertEquals(1, + scriptEngine.compile("testReturnsPrimitiveInt0", "1", ReturnsPrimitiveInt.CONTEXT, emptyMap()) + .newInstance().execute()); + assertEquals(1, + scriptEngine.compile("testReturnsPrimitiveInt1", "(int) 1L", ReturnsPrimitiveInt.CONTEXT, emptyMap()) + .newInstance().execute()); + assertEquals(1, scriptEngine.compile("testReturnsPrimitiveInt2", "(int) 1.1d", ReturnsPrimitiveInt.CONTEXT, emptyMap()) + .newInstance().execute()); assertEquals(1, - ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "Integer.valueOf(1)", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveInt3", "(int) 1.1f", ReturnsPrimitiveInt.CONTEXT, emptyMap()) + .newInstance().execute()); + assertEquals(1, + scriptEngine.compile("testReturnsPrimitiveInt4", "Integer.valueOf(1)", ReturnsPrimitiveInt.CONTEXT, emptyMap()) + .newInstance().execute()); - assertEquals(1, ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "def i = 1; i", emptyMap()).newInstance()).execute()); - assertEquals(1, ((ReturnsPrimitiveInt)scriptEngine.compile( - compiler, null, "def i = Integer.valueOf(1); i", emptyMap()).newInstance()).execute()); + assertEquals(1, + scriptEngine.compile("testReturnsPrimitiveInt5", "def i = 1; i", ReturnsPrimitiveInt.CONTEXT, emptyMap()) + .newInstance().execute()); + assertEquals(1, + scriptEngine.compile("testReturnsPrimitiveInt6", "def i = Integer.valueOf(1); i", ReturnsPrimitiveInt.CONTEXT, emptyMap()) + .newInstance().execute()); - assertEquals(2, ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "1 + 1", emptyMap()).newInstance()).execute()); + assertEquals(2, + scriptEngine.compile("testReturnsPrimitiveInt7", "1 + 1", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()); String debug = Debugger.toString(ReturnsPrimitiveInt.class, "1", new CompilerSettings()); assertThat(debug, containsString("ICONST_1")); @@ -324,49 +432,66 @@ public void testReturnsPrimitiveInt() throws Exception { assertThat(debug, containsString("IRETURN")); Exception e = expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "1L", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveInt8", "1L", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()); assertEquals("Cannot cast from [long] to [int].", e.getMessage()); e = expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "1.1f", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveInt9", "1.1f", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()); assertEquals("Cannot cast from [float] to [int].", e.getMessage()); e = expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "1.1d", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveInt10", "1.1d", ReturnsPrimitiveInt.CONTEXT, emptyMap()).newInstance().execute()); assertEquals("Cannot cast from [double] to [int].", e.getMessage()); expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "def i = 1L; i", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveInt11", "def i = 1L; i", ReturnsPrimitiveInt.CONTEXT, emptyMap()) + .newInstance().execute()); expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "def i = 1.1f; i", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveInt12", "def i = 1.1f; i", ReturnsPrimitiveInt.CONTEXT, emptyMap()) + .newInstance().execute()); expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "def i = 1.1d; i", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveInt13", "def i = 1.1d; i", ReturnsPrimitiveInt.CONTEXT, emptyMap()) + .newInstance().execute()); - assertEquals(0, ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "int i = 0", emptyMap()).newInstance()).execute()); + assertEquals(0, scriptEngine.compile("testReturnsPrimitiveInt14", "int i = 0", ReturnsPrimitiveInt.CONTEXT, emptyMap()) + .newInstance().execute()); } public abstract static class ReturnsPrimitiveFloat { + public interface Factory { + ReturnsPrimitiveFloat newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("returnsprimitivefloat", Factory.class); + public static final String[] PARAMETERS = new String[] {}; public abstract float execute(); } public void testReturnsPrimitiveFloat() throws Exception { - Compiler compiler = new Compiler(ReturnsPrimitiveFloat.class, null, null, painlessLookup); - - assertEquals(1.1f, ((ReturnsPrimitiveFloat)scriptEngine.compile(compiler, null, "1.1f", emptyMap()).newInstance()).execute(), 0); assertEquals(1.1f, - ((ReturnsPrimitiveFloat)scriptEngine.compile(compiler, null, "(float) 1.1d", emptyMap()).newInstance()).execute(), 0); + scriptEngine.compile("testReturnsPrimitiveFloat0", "1.1f", ReturnsPrimitiveFloat.CONTEXT, emptyMap()) + .newInstance().execute(), 0); + assertEquals(1.1f, + scriptEngine.compile("testReturnsPrimitiveFloat1", "(float) 1.1d", ReturnsPrimitiveFloat.CONTEXT, emptyMap()) + .newInstance().execute(), 0); assertEquals(1.1f, - ((ReturnsPrimitiveFloat)scriptEngine.compile(compiler, null, "def d = 1.1f; d", emptyMap()).newInstance()).execute(), 0); - assertEquals(1.1f, ((ReturnsPrimitiveFloat)scriptEngine.compile( - compiler, null, "def d = Float.valueOf(1.1f); d", emptyMap()).newInstance()).execute(), 0); + scriptEngine.compile("testReturnsPrimitiveFloat2", "def d = 1.1f; d", ReturnsPrimitiveFloat.CONTEXT, emptyMap()) + .newInstance().execute(), 0); + assertEquals(1.1f, scriptEngine.compile( + "testReturnsPrimitiveFloat3", "def d = Float.valueOf(1.1f); d", ReturnsPrimitiveFloat.CONTEXT, emptyMap()) + .newInstance().execute(), 0); assertEquals(1.1f + 6.7f, - ((ReturnsPrimitiveFloat)scriptEngine.compile(compiler, null, "1.1f + 6.7f", emptyMap()).newInstance()).execute(), 0); + scriptEngine.compile("testReturnsPrimitiveFloat4", "1.1f + 6.7f", ReturnsPrimitiveFloat.CONTEXT, emptyMap()) + .newInstance().execute(), 0); Exception e = expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveFloat)scriptEngine.compile(compiler, null, "1.1d", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveFloat5", "1.1d", ReturnsPrimitiveFloat.CONTEXT, emptyMap()) + .newInstance().execute()); assertEquals("Cannot cast from [double] to [float].", e.getMessage()); e = expectScriptThrows(ClassCastException.class, () -> - ((ReturnsPrimitiveFloat)scriptEngine.compile(compiler, null, "def d = 1.1d; d", emptyMap()).newInstance()).execute()); - e = expectScriptThrows(ClassCastException.class, () -> ((ReturnsPrimitiveFloat)scriptEngine.compile( - compiler, null, "def d = Double.valueOf(1.1); d", emptyMap()).newInstance()).execute()); + scriptEngine.compile("testReturnsPrimitiveFloat6", "def d = 1.1d; d", ReturnsPrimitiveFloat.CONTEXT, emptyMap()) + .newInstance().execute()); + e = expectScriptThrows(ClassCastException.class, () -> scriptEngine.compile( + "testReturnsPrimitiveFloat7", "def d = Double.valueOf(1.1); d", ReturnsPrimitiveFloat.CONTEXT, emptyMap()) + .newInstance().execute()); String debug = Debugger.toString(ReturnsPrimitiveFloat.class, "1f", new CompilerSettings()); assertThat(debug, containsString("FCONST_1")); @@ -374,41 +499,62 @@ public void testReturnsPrimitiveFloat() throws Exception { assertThat(debug, containsString("FRETURN")); assertEquals(0.0f, - ((ReturnsPrimitiveFloat)scriptEngine.compile(compiler, null, "int i = 0", emptyMap()).newInstance()).execute(), 0); + scriptEngine.compile("testReturnsPrimitiveFloat8", "int i = 0", ReturnsPrimitiveFloat.CONTEXT, emptyMap()) + .newInstance().execute(), 0); } - public abstract static class ReturnsPrimitiveDouble { + public abstract static class ReturnsPrimitiveDouble { + public interface Factory { + ReturnsPrimitiveDouble newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("returnsprimitivedouble", Factory.class); + public static final String[] PARAMETERS = new String[] {}; public abstract double execute(); } public void testReturnsPrimitiveDouble() throws Exception { - Compiler compiler = new Compiler(ReturnsPrimitiveDouble.class, null, null, painlessLookup); - - assertEquals(1.0, ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "1", emptyMap()).newInstance()).execute(), 0); - assertEquals(1.0, ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "1L", emptyMap()).newInstance()).execute(), 0); - assertEquals(1.1, ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "1.1d", emptyMap()).newInstance()).execute(), 0); + assertEquals(1.0, + scriptEngine.compile("testReturnsPrimitiveDouble0", "1", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) + .newInstance().execute(), 0); + assertEquals(1.0, + scriptEngine.compile("testReturnsPrimitiveDouble1", "1L", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) + .newInstance().execute(), 0); + assertEquals(1.1, + scriptEngine.compile("testReturnsPrimitiveDouble2", "1.1d", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) + .newInstance().execute(), 0); assertEquals((double) 1.1f, - ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "1.1f", emptyMap()).newInstance()).execute(), 0); - assertEquals(1.1, ((ReturnsPrimitiveDouble)scriptEngine.compile( - compiler, null, "Double.valueOf(1.1)", emptyMap()).newInstance()).execute(), 0); - assertEquals((double) 1.1f, ((ReturnsPrimitiveDouble)scriptEngine.compile( - compiler, null, "Float.valueOf(1.1f)", emptyMap()).newInstance()).execute(), 0); + scriptEngine.compile("testReturnsPrimitiveDouble3", "1.1f", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) + .newInstance().execute(), 0); + assertEquals(1.1, scriptEngine.compile( + "testReturnsPrimitiveDouble4", "Double.valueOf(1.1)", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) + .newInstance().execute(), 0); + assertEquals((double) 1.1f, scriptEngine.compile( + "testReturnsPrimitiveDouble5", "Float.valueOf(1.1f)", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) + .newInstance().execute(), 0); assertEquals(1.0, - ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "def d = 1; d", emptyMap()).newInstance()).execute(), 0); + scriptEngine.compile("testReturnsPrimitiveDouble6", "def d = 1; d", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) + .newInstance().execute(), 0); assertEquals(1.0, - ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "def d = 1L; d", emptyMap()).newInstance()).execute(), 0); + scriptEngine.compile("testReturnsPrimitiveDouble7", "def d = 1L; d", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) + .newInstance().execute(), 0); assertEquals(1.1, - ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "def d = 1.1d; d", emptyMap()).newInstance()).execute(), 0); + scriptEngine.compile("testReturnsPrimitiveDouble8", "def d = 1.1d; d", ReturnsPrimitiveDouble.CONTEXT, emptyMap()). + newInstance().execute(), 0); assertEquals((double) 1.1f, - ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "def d = 1.1f; d", emptyMap()).newInstance()).execute(), 0); - assertEquals(1.1, ((ReturnsPrimitiveDouble)scriptEngine.compile( - compiler, null, "def d = Double.valueOf(1.1); d", emptyMap()).newInstance()).execute(), 0); - assertEquals((double) 1.1f, ((ReturnsPrimitiveDouble)scriptEngine.compile( - compiler, null, "def d = Float.valueOf(1.1f); d", emptyMap()).newInstance()).execute(), 0); + scriptEngine.compile("testReturnsPrimitiveDouble9", "def d = 1.1f; d", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) + .newInstance().execute(), 0); + assertEquals(1.1, scriptEngine.compile( + "testReturnsPrimitiveDouble10", "def d = Double.valueOf(1.1); d", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) + .newInstance().execute(), 0); + assertEquals((double) 1.1f, scriptEngine.compile( + "testReturnsPrimitiveDouble11", "def d = Float.valueOf(1.1f); d", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) + .newInstance().execute(), 0); assertEquals(1.1 + 6.7, - ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "1.1 + 6.7", emptyMap()).newInstance()).execute(), 0); + scriptEngine.compile("testReturnsPrimitiveDouble12", "1.1 + 6.7", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) + .newInstance().execute(), 0); String debug = Debugger.toString(ReturnsPrimitiveDouble.class, "1", new CompilerSettings()); assertThat(debug, containsString("DCONST_1")); @@ -416,90 +562,126 @@ public void testReturnsPrimitiveDouble() throws Exception { assertThat(debug, containsString("DRETURN")); assertEquals(0.0, - ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "int i = 0", emptyMap()).newInstance()).execute(), 0); + scriptEngine.compile("testReturnsPrimitiveDouble13", "int i = 0", ReturnsPrimitiveDouble.CONTEXT, emptyMap()) + .newInstance().execute(), 0); } - public abstract static class NoArgumentsConstant { + public abstract static class NoArgsConstant { + public interface Factory { + NoArgsConstant newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("noargsconstant", Factory.class); + public abstract Object execute(String foo); } - public void testNoArgumentsConstant() { - Compiler compiler = new Compiler(NoArgumentsConstant.class, null, null, painlessLookup); + public void testNoArgsConstant() { Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> - scriptEngine.compile(compiler, null, "1", emptyMap())); + scriptEngine.compile("testNoArgsConstant0", "1", NoArgsConstant.CONTEXT, emptyMap()).newInstance().execute("constant")); assertThat(e.getMessage(), startsWith( "Painless needs a constant [String[] PARAMETERS] on all interfaces it implements with the " - + "names of the method arguments but [" + NoArgumentsConstant.class.getName() + "] doesn't have one.")); + + "names of the method arguments but [" + NoArgsConstant.class.getName() + "] doesn't have one.")); } - public abstract static class WrongArgumentsConstant { + public abstract static class WrongArgsConstant { + public interface Factory { + WrongArgsConstant newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("wrongargscontext", Factory.class); + boolean[] PARAMETERS = new boolean[] {false}; public abstract Object execute(String foo); } - public void testWrongArgumentsConstant() { - Compiler compiler = new Compiler(WrongArgumentsConstant.class, null, null, painlessLookup); + public void testWrongArgsConstant() { Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> - scriptEngine.compile(compiler, null, "1", emptyMap())); + scriptEngine.compile("testWrongArgsConstant0", "1", WrongArgsConstant.CONTEXT, emptyMap())); assertThat(e.getMessage(), startsWith( "Painless needs a constant [String[] PARAMETERS] on all interfaces it implements with the " - + "names of the method arguments but [" + WrongArgumentsConstant.class.getName() + "] doesn't have one.")); + + "names of the method arguments but [" + WrongArgsConstant.class.getName() + "] doesn't have one.")); } - public abstract static class WrongLengthOfArgumentConstant { + public abstract static class WrongLengthOfArgConstant { + public interface Factory { + WrongLengthOfArgConstant newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("wronglengthofargcontext", Factory.class); + public static final String[] PARAMETERS = new String[] {"foo", "bar"}; public abstract Object execute(String foo); } - public void testWrongLengthOfArgumentConstant() { - Compiler compiler = new Compiler(WrongLengthOfArgumentConstant.class, null, null, painlessLookup); + public void testWrongLengthOfArgConstant() { Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> - scriptEngine.compile(compiler, null, "1", emptyMap())); - assertThat(e.getMessage(), startsWith("[" + WrongLengthOfArgumentConstant.class.getName() + "#ARGUMENTS] has length [2] but [" - + WrongLengthOfArgumentConstant.class.getName() + "#execute] takes [1] argument.")); + scriptEngine.compile("testWrongLengthOfArgConstant", "1", WrongLengthOfArgConstant.CONTEXT, emptyMap())); + assertThat(e.getMessage(), startsWith("[" + WrongLengthOfArgConstant.class.getName() + "#ARGUMENTS] has length [2] but [" + + WrongLengthOfArgConstant.class.getName() + "#execute] takes [1] argument.")); } public abstract static class UnknownArgType { + public interface Factory { + UnknownArgType newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("unknownargtype", Factory.class); + public static final String[] PARAMETERS = new String[] {"foo"}; public abstract Object execute(UnknownArgType foo); } public void testUnknownArgType() { - Compiler compiler = new Compiler(UnknownArgType.class, null, null, painlessLookup); Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> - scriptEngine.compile(compiler, null, "1", emptyMap())); + scriptEngine.compile("testUnknownArgType0", "1", UnknownArgType.CONTEXT, emptyMap())); assertEquals("[foo] is of unknown type [" + UnknownArgType.class.getName() + ". Painless interfaces can only accept arguments " + "that are of whitelisted types.", e.getMessage()); } public abstract static class UnknownReturnType { + public interface Factory { + UnknownReturnType newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("unknownreturntype", Factory.class); + public static final String[] PARAMETERS = new String[] {"foo"}; public abstract UnknownReturnType execute(String foo); } public void testUnknownReturnType() { - Compiler compiler = new Compiler(UnknownReturnType.class, null, null, painlessLookup); Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> - scriptEngine.compile(compiler, null, "1", emptyMap())); + scriptEngine.compile("testUnknownReturnType0", "1", UnknownReturnType.CONTEXT, emptyMap())); assertEquals("Painless can only implement execute methods returning a whitelisted type but [" + UnknownReturnType.class.getName() + "#execute] returns [" + UnknownReturnType.class.getName() + "] which isn't whitelisted.", e.getMessage()); } public abstract static class UnknownArgTypeInArray { + public interface Factory { + UnknownArgTypeInArray newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("unknownargtypeinarray", Factory.class); + public static final String[] PARAMETERS = new String[] {"foo"}; public abstract Object execute(UnknownArgTypeInArray[] foo); } public void testUnknownArgTypeInArray() { - Compiler compiler = new Compiler(UnknownArgTypeInArray.class, null, null, painlessLookup); Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> - scriptEngine.compile(compiler, null, "1", emptyMap())); + scriptEngine.compile("testUnknownAryTypeInArray0", "1", UnknownArgTypeInArray.CONTEXT, emptyMap())); assertEquals("[foo] is of unknown type [" + UnknownArgTypeInArray.class.getName() + ". Painless interfaces can only accept " + "arguments that are of whitelisted types.", e.getMessage()); } public abstract static class TwoExecuteMethods { + public interface Factory { + TwoExecuteMethods newInstance(); + } + + public static final ScriptContext CONTEXT = new ScriptContext<>("twoexecutemethods", Factory.class); + public abstract Object execute(); public abstract Object execute(boolean foo); } public void testTwoExecuteMethods() { - Compiler compiler = new Compiler(TwoExecuteMethods.class, null, null, painlessLookup); Exception e = expectScriptThrows(IllegalArgumentException.class, false, () -> - scriptEngine.compile(compiler, null, "null", emptyMap())); + scriptEngine.compile("testTwoExecuteMethods0", "null", TwoExecuteMethods.CONTEXT, emptyMap())); assertEquals("Painless can only implement interfaces that have a single method named [execute] but [" + TwoExecuteMethods.class.getName() + "] has more than one.", e.getMessage()); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ContextExampleTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ContextExampleTests.java index 15eed75bcb8df..f14b270151c67 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ContextExampleTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ContextExampleTests.java @@ -19,6 +19,11 @@ package org.elasticsearch.painless; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.singletonMap; + /** * These tests run the Painless scripts used in the context docs against * slightly modified data designed around unit tests rather than a fully- @@ -308,4 +313,51 @@ public void testIngestProcessorScript() { curl -XPOST localhost:9200/seats/seat/_bulk?pipeline=seats -H "Content-Type: application/x-ndjson" --data-binary "@/home/jdconrad/test/seats.json" */ + + + // Use script_fields API to add two extra fields to the hits + + /* + curl -X GET localhost:9200/seats/_search + { + "query" : { + "match_all": {} + }, + "script_fields" : { + "day-of-week" : { + "script" : { + "source": "doc['datetime'].value.getDayOfWeek()" + } + }, + "number-of-actors" : { + "script" : { + "source": "params['_source']['actors'].length" + } + } + } + } + */ + + + // Testing only params, as I am not sure how to test Script Doc Values in painless + public void testScriptFieldsScript() { + Map hit = new HashMap<>(); + Map fields = new HashMap<>(); + fields.put("number-of-actors", 4); + hit.put("fields", fields); + + Map source = new HashMap<>(); + String[] actors = {"James Holland", "Krissy Smith", "Joe Muir", "Ryan Earns"}; + source.put("actors", actors); + + assertEquals(hit, exec( + "Map fields = new HashMap();" + + "fields[\"number-of-actors\"] = params['_source']['actors'].length;" + + "Map rtn = new HashMap();" + + "rtn[\"fields\"] = fields;" + + "return rtn;", + singletonMap("_source", source), true) + ); + } } + diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java index d95c9899c89ad..67e0fad53ec49 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java @@ -75,7 +75,7 @@ public void testDefaults() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertFalse(pointField.fieldType().stored()); assertEquals(1230, pointField.numericValue().longValue()); IndexableField dvField = fields[1]; @@ -149,7 +149,7 @@ public void testNoDocValues() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertEquals(1230, pointField.numericValue().longValue()); } @@ -173,7 +173,7 @@ public void testStore() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertEquals(1230, pointField.numericValue().doubleValue(), 0d); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); @@ -202,7 +202,7 @@ public void testCoerce() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertEquals(1230, pointField.numericValue().longValue()); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); @@ -317,7 +317,7 @@ public void testNullValue() throws IOException { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointDataDimensionCount()); assertFalse(pointField.fieldType().stored()); assertEquals(25, pointField.numericValue().longValue()); IndexableField dvField = fields[1]; diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/ParentJoinPlugin.java b/modules/parent-join/src/main/java/org/elasticsearch/join/ParentJoinPlugin.java index 2236662b5d54b..e2b84333447b4 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/ParentJoinPlugin.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/ParentJoinPlugin.java @@ -22,6 +22,8 @@ import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder; import org.elasticsearch.join.aggregations.InternalChildren; +import org.elasticsearch.join.aggregations.InternalParent; +import org.elasticsearch.join.aggregations.ParentAggregationBuilder; import org.elasticsearch.join.mapper.ParentJoinFieldMapper; import org.elasticsearch.join.query.HasChildQueryBuilder; import org.elasticsearch.join.query.HasParentQueryBuilder; @@ -51,9 +53,11 @@ public List> getQueries() { @Override public List getAggregations() { - return Collections.singletonList( - new AggregationSpec(ChildrenAggregationBuilder.NAME, ChildrenAggregationBuilder::new, ChildrenAggregationBuilder::parse) - .addResultReader(InternalChildren::new) + return Arrays.asList( + new AggregationSpec(ChildrenAggregationBuilder.NAME, ChildrenAggregationBuilder::new, ChildrenAggregationBuilder::parse) + .addResultReader(InternalChildren::new), + new AggregationSpec(ParentAggregationBuilder.NAME, ParentAggregationBuilder::new, ParentAggregationBuilder::parse) + .addResultReader(InternalParent::new) ); } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregator.java new file mode 100644 index 0000000000000..8c2ac5373b4b4 --- /dev/null +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregator.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.join.aggregations; + +import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * A {@link BucketsAggregator} which resolves to the matching parent documents. + */ +public class ChildrenToParentAggregator extends ParentJoinAggregator { + + static final ParseField TYPE_FIELD = new ParseField("type"); + + public ChildrenToParentAggregator(String name, AggregatorFactories factories, + SearchContext context, Aggregator parent, Query childFilter, + Query parentFilter, ValuesSource.Bytes.WithOrdinals valuesSource, + long maxOrd, List pipelineAggregators, Map metaData) throws IOException { + super(name, factories, context, parent, childFilter, parentFilter, valuesSource, maxOrd, pipelineAggregators, metaData); + } + + @Override + public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { + return new InternalParent(name, bucketDocCount(owningBucketOrdinal), + bucketAggregations(owningBucketOrdinal), pipelineAggregators(), metaData()); + } + + @Override + public InternalAggregation buildEmptyAggregation() { + return new InternalParent(name, 0, buildEmptySubAggregations(), pipelineAggregators(), + metaData()); + } +} diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/InternalParent.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/InternalParent.java new file mode 100644 index 0000000000000..f61589b1d9aaf --- /dev/null +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/InternalParent.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.join.aggregations; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * Results of the {@link ChildrenToParentAggregator}. + */ +public class InternalParent extends InternalSingleBucketAggregation implements Parent { + public InternalParent(String name, long docCount, InternalAggregations aggregations, List pipelineAggregators, + Map metaData) { + super(name, docCount, aggregations, pipelineAggregators, metaData); + } + + /** + * Read from a stream. + */ + public InternalParent(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return ParentAggregationBuilder.NAME; + } + + @Override + protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { + return new InternalParent(name, docCount, subAggregations, pipelineAggregators(), getMetaData()); + } +} diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/JoinAggregationBuilders.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/JoinAggregationBuilders.java index 73522a68b4595..4d4708cbcbe9b 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/JoinAggregationBuilders.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/JoinAggregationBuilders.java @@ -26,4 +26,11 @@ public abstract class JoinAggregationBuilders { public static ChildrenAggregationBuilder children(String name, String childType) { return new ChildrenAggregationBuilder(name, childType); } + + /** + * Create a new {@link Parent} aggregation with the given name. + */ + public static ParentAggregationBuilder parent(String name, String childType) { + return new ParentAggregationBuilder(name, childType); + } } diff --git a/server/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/Parent.java similarity index 73% rename from server/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java rename to modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/Parent.java index af6ea851803e3..1942798b51338 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/Parent.java @@ -17,13 +17,12 @@ * under the License. */ -package org.elasticsearch.monitor.os; +package org.elasticsearch.join.aggregations; -public class DummyOsInfo extends OsInfo { +import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregation; - private DummyOsInfo() { - super(0, 0, 0, "dummy_name", "dummy_arch", "dummy_version"); - } - - public static final DummyOsInfo INSTANCE = new DummyOsInfo(); +/** + * An single bucket aggregation that translates child documents to their parent documents. + */ +public interface Parent extends SingleBucketAggregation { } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java new file mode 100644 index 0000000000000..495a5c0f9ad3f --- /dev/null +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java @@ -0,0 +1,176 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.join.aggregations; + +import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.fielddata.plain.SortedSetDVOrdinalsIndexFieldData; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.join.mapper.ParentIdFieldMapper; +import org.elasticsearch.join.mapper.ParentJoinFieldMapper; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.support.FieldContext; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.aggregations.support.ValuesSource.Bytes.WithOrdinals; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +public class ParentAggregationBuilder + extends ValuesSourceAggregationBuilder { + + public static final String NAME = "parent"; + + private final String childType; + private Query parentFilter; + private Query childFilter; + + /** + * @param name + * the name of this aggregation + * @param childType + * the type of children documents + */ + public ParentAggregationBuilder(String name, String childType) { + super(name, ValuesSourceType.BYTES, ValueType.STRING); + if (childType == null) { + throw new IllegalArgumentException("[childType] must not be null: [" + name + "]"); + } + this.childType = childType; + } + + protected ParentAggregationBuilder(ParentAggregationBuilder clone, + Builder factoriesBuilder, Map metaData) { + super(clone, factoriesBuilder, metaData); + this.childType = clone.childType; + this.childFilter = clone.childFilter; + this.parentFilter = clone.parentFilter; + } + + @Override + protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map metaData) { + return new ParentAggregationBuilder(this, factoriesBuilder, metaData); + } + + /** + * Read from a stream. + */ + public ParentAggregationBuilder(StreamInput in) throws IOException { + super(in, ValuesSourceType.BYTES, ValueType.STRING); + childType = in.readString(); + } + + @Override + protected void innerWriteTo(StreamOutput out) throws IOException { + out.writeString(childType); + } + + @Override + protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, + ValuesSourceConfig config, + AggregatorFactory parent, + Builder subFactoriesBuilder) throws IOException { + return new ParentAggregatorFactory(name, config, childFilter, parentFilter, context, parent, + subFactoriesBuilder, metaData); + } + + @Override + protected ValuesSourceConfig resolveConfig(SearchContext context) { + ValuesSourceConfig config = new ValuesSourceConfig<>(ValuesSourceType.BYTES); + joinFieldResolveConfig(context, config); + return config; + } + + private void joinFieldResolveConfig(SearchContext context, ValuesSourceConfig config) { + ParentJoinFieldMapper parentJoinFieldMapper = ParentJoinFieldMapper.getMapper(context.mapperService()); + ParentIdFieldMapper parentIdFieldMapper = parentJoinFieldMapper.getParentIdFieldMapper(childType, false); + if (parentIdFieldMapper != null) { + parentFilter = parentIdFieldMapper.getParentFilter(); + childFilter = parentIdFieldMapper.getChildFilter(childType); + MappedFieldType fieldType = parentIdFieldMapper.fieldType(); + final SortedSetDVOrdinalsIndexFieldData fieldData = context.getForField(fieldType); + config.fieldContext(new FieldContext(fieldType.name(), fieldData, fieldType)); + } else { + config.unmapped(true); + } + } + + @Override + protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + builder.field(ChildrenToParentAggregator.TYPE_FIELD.getPreferredName(), childType); + return builder; + } + + public static ParentAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException { + String childType = null; + + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + if ("type".equals(currentFieldName)) { + childType = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "Unexpected token " + token + " in [" + aggregationName + "]."); + } + } + + if (childType == null) { + throw new ParsingException(parser.getTokenLocation(), + "Missing [child_type] field for parent aggregation [" + aggregationName + "]"); + } + + return new ParentAggregationBuilder(aggregationName, childType); + } + + @Override + protected int innerHashCode() { + return Objects.hash(childType); + } + + @Override + protected boolean innerEquals(Object obj) { + ParentAggregationBuilder other = (ParentAggregationBuilder) obj; + return Objects.equals(childType, other.childType); + } + + @Override + public String getType() { + return NAME; + } +} diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregatorFactory.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregatorFactory.java new file mode 100644 index 0000000000000..2ae3da7c47af3 --- /dev/null +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregatorFactory.java @@ -0,0 +1,83 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.join.aggregations; + +import org.apache.lucene.search.Query; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.NonCollectingAggregator; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource.Bytes.WithOrdinals; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class ParentAggregatorFactory extends ValuesSourceAggregatorFactory { + + private final Query parentFilter; + private final Query childFilter; + + public ParentAggregatorFactory(String name, + ValuesSourceConfig config, + Query childFilter, + Query parentFilter, + SearchContext context, + AggregatorFactory parent, + AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, context, parent, subFactoriesBuilder, metaData); + + this.childFilter = childFilter; + this.parentFilter = parentFilter; + } + + @Override + protected Aggregator createUnmapped(Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { + return new NonCollectingAggregator(name, context, parent, pipelineAggregators, metaData) { + @Override + public InternalAggregation buildEmptyAggregation() { + return new InternalParent(name, 0, buildEmptySubAggregations(), pipelineAggregators(), metaData()); + } + }; + } + + @Override + protected Aggregator doCreateInternal(WithOrdinals valuesSource, + Aggregator children, + boolean collectsFromSingleBucket, + List pipelineAggregators, + Map metaData) throws IOException { + + long maxOrd = valuesSource.globalMaxOrd(context.searcher()); + if (collectsFromSingleBucket) { + return new ChildrenToParentAggregator(name, factories, context, children, childFilter, + parentFilter, valuesSource, maxOrd, pipelineAggregators, metaData); + } else { + return asMultiBucketAggregator(this, context, children); + } + } +} diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java index 3990e8697ef63..813c8aeaf3a2d 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregator.java @@ -53,5 +53,4 @@ public InternalAggregation buildEmptyAggregation() { return new InternalChildren(name, 0, buildEmptySubAggregations(), pipelineAggregators(), metaData()); } - } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParsedParent.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParsedParent.java new file mode 100644 index 0000000000000..40393c8323505 --- /dev/null +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParsedParent.java @@ -0,0 +1,36 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.join.aggregations; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.ParsedSingleBucketAggregation; + +import java.io.IOException; + +public class ParsedParent extends ParsedSingleBucketAggregation implements Parent { + + @Override + public String getType() { + return ParentAggregationBuilder.NAME; + } + + public static ParsedParent fromXContent(XContentParser parser, final String name) throws IOException { + return parseXContent(parser, new ParsedParent(), name); + } +} diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/spi/ParentJoinNamedXContentProvider.java b/modules/parent-join/src/main/java/org/elasticsearch/join/spi/ParentJoinNamedXContentProvider.java index 250241014613c..03924471d5a91 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/spi/ParentJoinNamedXContentProvider.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/spi/ParentJoinNamedXContentProvider.java @@ -23,20 +23,26 @@ import org.elasticsearch.common.xcontent.ContextParser; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.join.aggregations.ChildrenAggregationBuilder; +import org.elasticsearch.join.aggregations.ParentAggregationBuilder; import org.elasticsearch.join.aggregations.ParsedChildren; +import org.elasticsearch.join.aggregations.ParsedParent; import org.elasticsearch.plugins.spi.NamedXContentProvider; import org.elasticsearch.search.aggregations.Aggregation; +import java.util.Arrays; import java.util.List; -import static java.util.Collections.singletonList; - public class ParentJoinNamedXContentProvider implements NamedXContentProvider { @Override public List getNamedXContentParsers() { - ParseField parseField = new ParseField(ChildrenAggregationBuilder.NAME); - ContextParser contextParser = (p, name) -> ParsedChildren.fromXContent(p, (String) name); - return singletonList(new NamedXContentRegistry.Entry(Aggregation.class, parseField, contextParser)); + ParseField parseFieldChildren = new ParseField(ChildrenAggregationBuilder.NAME); + ParseField parseFieldParent = new ParseField(ParentAggregationBuilder.NAME); + ContextParser contextParserChildren = (p, name) -> ParsedChildren.fromXContent(p, (String) name); + ContextParser contextParserParent = (p, name) -> ParsedParent.fromXContent(p, (String) name); + return Arrays.asList( + new NamedXContentRegistry.Entry(Aggregation.class, parseFieldChildren, contextParserChildren), + new NamedXContentRegistry.Entry(Aggregation.class, parseFieldParent, contextParserParent) + ); } } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/AbstractParentChildTestCase.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/AbstractParentChildTestCase.java new file mode 100644 index 0000000000000..3dc14b2f9c47b --- /dev/null +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/AbstractParentChildTestCase.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.join.aggregations; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.join.query.ParentChildTestCase; +import org.junit.Before; + +/** + * Small base test-class which combines stuff used for Children and Parent aggregation tests + */ +public abstract class AbstractParentChildTestCase extends ParentChildTestCase { + protected final Map categoryToControl = new HashMap<>(); + protected final Map articleToControl = new HashMap<>(); + + @Before + public void setupCluster() throws Exception { + assertAcked( + prepareCreate("test") + .addMapping("doc", + addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "article", "comment"), + "commenter", "keyword", "category", "keyword")) + ); + + List requests = new ArrayList<>(); + String[] uniqueCategories = new String[randomIntBetween(1, 25)]; + for (int i = 0; i < uniqueCategories.length; i++) { + uniqueCategories[i] = Integer.toString(i); + } + int catIndex = 0; + + int numParentDocs = randomIntBetween(uniqueCategories.length, uniqueCategories.length * 5); + for (int i = 0; i < numParentDocs; i++) { + String id = "article-" + i; + + // TODO: this array is always of length 1, and testChildrenAggs fails if this is changed + String[] categories = new String[randomIntBetween(1,1)]; + for (int j = 0; j < categories.length; j++) { + String category = categories[j] = uniqueCategories[catIndex++ % uniqueCategories.length]; + Control control = categoryToControl.computeIfAbsent(category, Control::new); + control.articleIds.add(id); + articleToControl.put(id, new ParentControl(category)); + } + + IndexRequestBuilder indexRequest = createIndexRequest("test", "article", id, null, "category", categories, "randomized", true); + requests.add(indexRequest); + } + + String[] commenters = new String[randomIntBetween(5, 50)]; + for (int i = 0; i < commenters.length; i++) { + commenters[i] = Integer.toString(i); + } + + int id = 0; + for (Control control : categoryToControl.values()) { + for (String articleId : control.articleIds) { + int numChildDocsPerParent = randomIntBetween(0, 5); + for (int i = 0; i < numChildDocsPerParent; i++) { + String commenter = commenters[id % commenters.length]; + String idValue = "comment-" + id++; + control.commentIds.add(idValue); + Set ids = control.commenterToCommentId.computeIfAbsent(commenter, k -> new HashSet<>()); + ids.add(idValue); + + articleToControl.get(articleId).commentIds.add(idValue); + + IndexRequestBuilder indexRequest = createIndexRequest("test", "comment", idValue, + articleId, "commenter", commenter, "randomized", true); + requests.add(indexRequest); + } + } + } + + requests.add(createIndexRequest("test", "article", "a", null, "category", new String[]{"a"}, "randomized", false)); + requests.add(createIndexRequest("test", "article", "b", null, "category", new String[]{"a", "b"}, "randomized", false)); + requests.add(createIndexRequest("test", "article", "c", null, "category", new String[]{"a", "b", "c"}, "randomized", false)); + requests.add(createIndexRequest("test", "article", "d", null, "category", new String[]{"c"}, "randomized", false)); + requests.add(createIndexRequest("test", "comment", "e", "a")); + requests.add(createIndexRequest("test", "comment", "f", "c")); + + indexRandom(true, requests); + ensureSearchable("test"); + } + + + protected static final class Control { + + final String category; + final Set articleIds = new HashSet<>(); + final Set commentIds = new HashSet<>(); + final Map> commenterToCommentId = new HashMap<>(); + + private Control(String category) { + this.category = category; + } + } + + protected static final class ParentControl { + final String category; + final Set commentIds = new HashSet<>(); + + private ParentControl(String category) { + this.category = category; + } + } +} diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java index 61f00647f3c06..46008451736f9 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenIT.java @@ -25,7 +25,6 @@ import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.join.query.ParentChildTestCase; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -33,11 +32,8 @@ import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.metrics.TopHits; import org.elasticsearch.search.sort.SortOrder; -import org.junit.Before; import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -58,80 +54,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.sameInstance; -public class ChildrenIT extends ParentChildTestCase { - - - private static final Map categoryToControl = new HashMap<>(); - - - @Before - public void setupCluster() throws Exception { - categoryToControl.clear(); - assertAcked( - prepareCreate("test") - .addMapping("doc", - addFieldMappings(buildParentJoinFieldMappingFromSimplifiedDef("join_field", true, "article", "comment"), - "commenter", "keyword", "category", "keyword")) - ); - - List requests = new ArrayList<>(); - String[] uniqueCategories = new String[randomIntBetween(1, 25)]; - for (int i = 0; i < uniqueCategories.length; i++) { - uniqueCategories[i] = Integer.toString(i); - } - int catIndex = 0; - - int numParentDocs = randomIntBetween(uniqueCategories.length, uniqueCategories.length * 5); - for (int i = 0; i < numParentDocs; i++) { - String id = "article-" + i; - - // TODO: this array is always of length 1, and testChildrenAggs fails if this is changed - String[] categories = new String[randomIntBetween(1,1)]; - for (int j = 0; j < categories.length; j++) { - String category = categories[j] = uniqueCategories[catIndex++ % uniqueCategories.length]; - Control control = categoryToControl.get(category); - if (control == null) { - categoryToControl.put(category, control = new Control()); - } - control.articleIds.add(id); - } - - requests.add(createIndexRequest("test", "article", id, null, "category", categories, "randomized", true)); - } - - String[] commenters = new String[randomIntBetween(5, 50)]; - for (int i = 0; i < commenters.length; i++) { - commenters[i] = Integer.toString(i); - } - - int id = 0; - for (Control control : categoryToControl.values()) { - for (String articleId : control.articleIds) { - int numChildDocsPerParent = randomIntBetween(0, 5); - for (int i = 0; i < numChildDocsPerParent; i++) { - String commenter = commenters[id % commenters.length]; - String idValue = "comment-" + id++; - control.commentIds.add(idValue); - Set ids = control.commenterToCommentId.get(commenter); - if (ids == null) { - control.commenterToCommentId.put(commenter, ids = new HashSet<>()); - } - ids.add(idValue); - requests.add(createIndexRequest("test", "comment", idValue, articleId, "commenter", commenter)); - } - } - } - - requests.add(createIndexRequest("test", "article", "a", null, "category", new String[]{"a"}, "randomized", false)); - requests.add(createIndexRequest("test", "article", "b", null, "category", new String[]{"a", "b"}, "randomized", false)); - requests.add(createIndexRequest("test", "article", "c", null, "category", new String[]{"a", "b", "c"}, "randomized", false)); - requests.add(createIndexRequest("test", "article", "d", null, "category", new String[]{"c"}, "randomized", false)); - requests.add(createIndexRequest("test", "comment", "e", "a")); - requests.add(createIndexRequest("test", "comment", "f", "c")); - - indexRandom(true, requests); - ensureSearchable("test"); - } +public class ChildrenIT extends AbstractParentChildTestCase { public void testChildrenAggs() throws Exception { SearchResponse searchResponse = client().prepareSearch("test") @@ -455,10 +378,4 @@ public void testPostCollectAllLeafReaders() throws Exception { children = parents.getBuckets().get(0).getAggregations().get("child_docs"); assertThat(children.getDocCount(), equalTo(2L)); } - - private static final class Control { - final Set articleIds = new HashSet<>(); - final Set commentIds = new HashSet<>(); - final Map> commenterToCommentId = new HashMap<>(); - } } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenTests.java index 85a97c4b9b413..58d315d2d43ed 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenTests.java @@ -37,8 +37,7 @@ protected Collection> getPlugins() { protected ChildrenAggregationBuilder createTestAggregatorBuilder() { String name = randomAlphaOfLengthBetween(3, 20); String childType = randomAlphaOfLengthBetween(5, 40); - ChildrenAggregationBuilder factory = new ChildrenAggregationBuilder(name, childType); - return factory; + return new ChildrenAggregationBuilder(name, childType); } } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java new file mode 100644 index 0000000000000..685c872fa72d4 --- /dev/null +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java @@ -0,0 +1,327 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.join.aggregations; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedDocValuesField; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.join.mapper.MetaJoinFieldMapper; +import org.elasticsearch.join.mapper.ParentJoinFieldMapper; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.bucket.terms.LongTerms; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalMin; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValueType; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; +import java.util.function.Consumer; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class ChildrenToParentAggregatorTests extends AggregatorTestCase { + + private static final String CHILD_TYPE = "child_type"; + private static final String PARENT_TYPE = "parent_type"; + + public void testNoDocs() throws IOException { + Directory directory = newDirectory(); + + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + // intentionally not writing any docs + indexWriter.close(); + IndexReader indexReader = DirectoryReader.open(directory); + + testCase(new MatchAllDocsQuery(), newSearcher(indexReader, false, true), childrenToParent -> { + assertEquals(0, childrenToParent.getDocCount()); + Aggregation parentAggregation = childrenToParent.getAggregations().get("in_parent"); + assertEquals(0, childrenToParent.getDocCount()); + assertNotNull("Aggregations: " + childrenToParent.getAggregations().asMap(), parentAggregation); + assertEquals(Double.POSITIVE_INFINITY, ((InternalMin) parentAggregation).getValue(), Double.MIN_VALUE); + }); + indexReader.close(); + directory.close(); + } + + public void testParentChild() throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + + final Map> expectedParentChildRelations = setupIndex(indexWriter); + indexWriter.close(); + + IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), + new ShardId(new Index("foo", "_na_"), 1)); + // TODO set "maybeWrap" to true for IndexSearcher once #23338 is resolved + IndexSearcher indexSearcher = newSearcher(indexReader, false, true); + + // verify with all documents + testCase(new MatchAllDocsQuery(), indexSearcher, parent -> { + int expectedTotalParents = 0; + int expectedMinValue = Integer.MAX_VALUE; + for (Tuple expectedValues : expectedParentChildRelations.values()) { + expectedTotalParents++; + expectedMinValue = Math.min(expectedMinValue, expectedValues.v2()); + } + assertEquals("Having " + parent.getDocCount() + " docs and aggregation results: " + + parent.getAggregations().asMap(), + expectedTotalParents, parent.getDocCount()); + assertEquals(expectedMinValue, ((InternalMin) parent.getAggregations().get("in_parent")).getValue(), Double.MIN_VALUE); + }); + + // verify for each children + for (String parent : expectedParentChildRelations.keySet()) { + testCase(new TermInSetQuery(IdFieldMapper.NAME, Uid.encodeId("child0_" + parent)), + indexSearcher, aggregation -> { + assertEquals("Expected one result for min-aggregation for parent: " + parent + + ", but had aggregation-results: " + aggregation, + 1, aggregation.getDocCount()); + assertEquals(expectedParentChildRelations.get(parent).v2(), + ((InternalMin) aggregation.getAggregations().get("in_parent")).getValue(), Double.MIN_VALUE); + }); + } + + indexReader.close(); + directory.close(); + } + + + public void testParentChildTerms() throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + + final Map> expectedParentChildRelations = setupIndex(indexWriter); + indexWriter.close(); + + SortedMap entries = new TreeMap<>(); + for (Tuple value : expectedParentChildRelations.values()) { + Long l = entries.computeIfAbsent(value.v2(), integer -> 0L); + entries.put(value.v2(), l+1); + } + List> sortedValues = new ArrayList<>(entries.entrySet()); + sortedValues.sort((o1, o2) -> { + // sort larger values first + int ret = o2.getValue().compareTo(o1.getValue()); + if(ret != 0) { + return ret; + } + + // on equal value, sort by key + return o1.getKey().compareTo(o2.getKey()); + }); + + IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), + new ShardId(new Index("foo", "_na_"), 1)); + // TODO set "maybeWrap" to true for IndexSearcher once #23338 is resolved + IndexSearcher indexSearcher = newSearcher(indexReader, false, true); + + // verify a terms-aggregation inside the parent-aggregation + testCaseTerms(new MatchAllDocsQuery(), indexSearcher, parent -> { + assertNotNull(parent); + LongTerms valueTerms = parent.getAggregations().get("value_terms"); + assertNotNull(valueTerms); + + List valueTermsBuckets = valueTerms.getBuckets(); + assertNotNull(valueTermsBuckets); + assertEquals("Had: " + parent, sortedValues.size(), valueTermsBuckets.size()); + int i = 0; + for (Map.Entry entry : sortedValues) { + LongTerms.Bucket bucket = valueTermsBuckets.get(i); + assertEquals(entry.getKey().longValue(), bucket.getKeyAsNumber()); + assertEquals(entry.getValue(), (Long)bucket.getDocCount()); + + i++; + } + }); + + indexReader.close(); + directory.close(); + } + + public void testTermsParentChildTerms() throws IOException { + Directory directory = newDirectory(); + RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); + + final Map> expectedParentChildRelations = setupIndex(indexWriter); + indexWriter.close(); + + SortedMap sortedValues = new TreeMap<>(); + for (Tuple value : expectedParentChildRelations.values()) { + Long l = sortedValues.computeIfAbsent(value.v2(), integer -> 0L); + sortedValues.put(value.v2(), l+1); + } + + IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), + new ShardId(new Index("foo", "_na_"), 1)); + // TODO set "maybeWrap" to true for IndexSearcher once #23338 is resolved + IndexSearcher indexSearcher = newSearcher(indexReader, false, true); + + // verify a terms-aggregation inside the parent-aggregation which itself is inside a + // terms-aggregation on the child-documents + testCaseTermsParentTerms(new MatchAllDocsQuery(), indexSearcher, longTerms -> { + assertNotNull(longTerms); + + for (LongTerms.Bucket bucket : longTerms.getBuckets()) { + assertNotNull(bucket); + assertNotNull(bucket.getKeyAsString()); + } + }); + + indexReader.close(); + directory.close(); + } + + private static Map> setupIndex(RandomIndexWriter iw) throws IOException { + Map> expectedValues = new HashMap<>(); + int numParents = randomIntBetween(1, 10); + for (int i = 0; i < numParents; i++) { + String parent = "parent" + i; + int randomValue = randomIntBetween(0, 100); + List parentDocument = createParentDocument(parent, randomValue); + /*long parentDocId =*/ iw.addDocument(parentDocument); + //System.out.println("Parent: " + parent + ": " + randomValue + ", id: " + parentDocId); + int numChildren = randomIntBetween(1, 10); + int minValue = Integer.MAX_VALUE; + for (int c = 0; c < numChildren; c++) { + minValue = Math.min(minValue, randomValue); + int randomSubValue = randomIntBetween(0, 100); + List childDocument = createChildDocument("child" + c + "_" + parent, parent, randomSubValue); + /*long childDocId =*/ iw.addDocument(childDocument); + //System.out.println("Child: " + "child" + c + "_" + parent + ": " + randomSubValue + ", id: " + childDocId); + } + expectedValues.put(parent, new Tuple<>(numChildren, minValue)); + } + return expectedValues; + } + + private static List createParentDocument(String id, int value) { + return Arrays.asList( + new StringField(IdFieldMapper.NAME, Uid.encodeId(id), Field.Store.NO), + new StringField("join_field", PARENT_TYPE, Field.Store.NO), + createJoinField(PARENT_TYPE, id), + new SortedNumericDocValuesField("number", value) + ); + } + + private static List createChildDocument(String childId, String parentId, int value) { + return Arrays.asList( + new StringField(IdFieldMapper.NAME, Uid.encodeId(childId), Field.Store.NO), + new StringField("join_field", CHILD_TYPE, Field.Store.NO), + createJoinField(PARENT_TYPE, parentId), + new SortedNumericDocValuesField("subNumber", value) + ); + } + + private static SortedDocValuesField createJoinField(String parentType, String id) { + return new SortedDocValuesField("join_field#" + parentType, new BytesRef(id)); + } + + @Override + protected MapperService mapperServiceMock() { + ParentJoinFieldMapper joinFieldMapper = createJoinFieldMapper(); + MapperService mapperService = mock(MapperService.class); + MetaJoinFieldMapper.MetaJoinFieldType metaJoinFieldType = mock(MetaJoinFieldMapper.MetaJoinFieldType.class); + when(metaJoinFieldType.getMapper()).thenReturn(joinFieldMapper); + when(mapperService.fullName("_parent_join")).thenReturn(metaJoinFieldType); + return mapperService; + } + + private static ParentJoinFieldMapper createJoinFieldMapper() { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); + return new ParentJoinFieldMapper.Builder("join_field") + .addParent(PARENT_TYPE, Collections.singleton(CHILD_TYPE)) + .build(new Mapper.BuilderContext(settings, new ContentPath(0))); + } + + private void testCase(Query query, IndexSearcher indexSearcher, Consumer verify) + throws IOException { + + ParentAggregationBuilder aggregationBuilder = new ParentAggregationBuilder("_name", CHILD_TYPE); + aggregationBuilder.subAggregation(new MinAggregationBuilder("in_parent").field("number")); + + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + fieldType.setName("number"); + InternalParent result = search(indexSearcher, query, aggregationBuilder, fieldType); + verify.accept(result); + } + + private void testCaseTerms(Query query, IndexSearcher indexSearcher, Consumer verify) + throws IOException { + + ParentAggregationBuilder aggregationBuilder = new ParentAggregationBuilder("_name", CHILD_TYPE); + aggregationBuilder.subAggregation(new TermsAggregationBuilder("value_terms", ValueType.LONG).field("number")); + + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + fieldType.setName("number"); + InternalParent result = search(indexSearcher, query, aggregationBuilder, fieldType); + verify.accept(result); + } + + // run a terms aggregation on the number in child-documents, then a parent aggregation and then terms on the parent-number + private void testCaseTermsParentTerms(Query query, IndexSearcher indexSearcher, Consumer verify) + throws IOException { + AggregationBuilder aggregationBuilder = + new TermsAggregationBuilder("subvalue_terms", ValueType.LONG).field("subNumber"). + subAggregation(new ParentAggregationBuilder("to_parent", CHILD_TYPE). + subAggregation(new TermsAggregationBuilder("value_terms", ValueType.LONG).field("number"))); + + MappedFieldType fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + fieldType.setName("number"); + MappedFieldType subFieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + subFieldType.setName("subNumber"); + LongTerms result = search(indexSearcher, query, aggregationBuilder, fieldType, subFieldType); + verify.accept(result); + } +} diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/InternalParentTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/InternalParentTests.java new file mode 100644 index 0000000000000..be4792a867d33 --- /dev/null +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/InternalParentTests.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.join.aggregations; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.common.xcontent.NamedXContentRegistry.Entry; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.InternalSingleBucketAggregationTestCase; +import org.elasticsearch.search.aggregations.bucket.ParsedSingleBucketAggregation; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +public class InternalParentTests extends InternalSingleBucketAggregationTestCase { + + @Override + protected List getNamedXContents() { + List extendedNamedXContents = new ArrayList<>(super.getNamedXContents()); + extendedNamedXContents.add(new Entry(Aggregation.class, new ParseField(ParentAggregationBuilder.NAME), + (p, c) -> ParsedParent.fromXContent(p, (String) c))); + return extendedNamedXContents ; + } + + @Override + protected InternalParent createTestInstance(String name, long docCount, InternalAggregations aggregations, + List pipelineAggregators, Map metaData) { + return new InternalParent(name, docCount, aggregations, pipelineAggregators, metaData); + } + + @Override + protected void extraAssertReduced(InternalParent reduced, List inputs) { + // Nothing extra to assert + } + + @Override + protected Reader instanceReader() { + return InternalParent::new; + } + + @Override + protected Class implementationClass() { + return ParsedParent.class; + } +} diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentIT.java new file mode 100644 index 0000000000000..635195e62fe30 --- /dev/null +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentIT.java @@ -0,0 +1,238 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.join.aggregations; + +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.join.aggregations.JoinAggregationBuilders.parent; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.search.aggregations.AggregationBuilders.topHits; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; + +public class ParentIT extends AbstractParentChildTestCase { + + public void testSimpleParentAgg() throws Exception { + final SearchRequestBuilder searchRequest = client().prepareSearch("test") + .setSize(10000) + .setQuery(matchQuery("randomized", true)) + .addAggregation( + parent("to_article", "comment") + .subAggregation( + terms("category").field("category").size(10000))); + SearchResponse searchResponse = searchRequest.get(); + assertSearchResponse(searchResponse); + + long articlesWithComment = articleToControl.values().stream().filter( + parentControl -> !parentControl.commentIds.isEmpty() + ).count(); + + Parent parentAgg = searchResponse.getAggregations().get("to_article"); + assertThat("Request: " + searchRequest + "\nResponse: " + searchResponse + "\n", + parentAgg.getDocCount(), equalTo(articlesWithComment)); + Terms categoryTerms = parentAgg.getAggregations().get("category"); + long categoriesWithComments = categoryToControl.values().stream().filter( + control -> !control.commentIds.isEmpty()).count(); + assertThat("Buckets: " + categoryTerms.getBuckets().stream().map( + (Function) MultiBucketsAggregation.Bucket::getKeyAsString).collect(Collectors.toList()) + + "\nCategories: " + categoryToControl.keySet(), + (long)categoryTerms.getBuckets().size(), equalTo(categoriesWithComments)); + for (Map.Entry entry : categoryToControl.entrySet()) { + // no children for this category -> no entry in the child to parent-aggregation + if(entry.getValue().commentIds.isEmpty()) { + assertNull(categoryTerms.getBucketByKey(entry.getKey())); + continue; + } + + final Terms.Bucket categoryBucket = categoryTerms.getBucketByKey(entry.getKey()); + assertNotNull("Failed for category " + entry.getKey(), + categoryBucket); + assertThat("Failed for category " + entry.getKey(), + categoryBucket.getKeyAsString(), equalTo(entry.getKey())); + + // count all articles in this category which have at least one comment + long articlesForCategory = articleToControl.values().stream(). + // only articles with this category + filter(parentControl -> parentControl.category.equals(entry.getKey())). + // only articles which have comments + filter(parentControl -> !parentControl.commentIds.isEmpty()). + count(); + assertThat("Failed for category " + entry.getKey(), + categoryBucket.getDocCount(), equalTo(articlesForCategory)); + } + } + + public void testParentAggs() throws Exception { + final SearchRequestBuilder searchRequest = client().prepareSearch("test") + .setSize(10000) + .setQuery(matchQuery("randomized", true)) + .addAggregation( + terms("to_commenter").field("commenter").size(10000).subAggregation( + parent("to_article", "comment").subAggregation( + terms("to_category").field("category").size(10000).subAggregation( + topHits("top_category") + )) + ) + ); + SearchResponse searchResponse = searchRequest.get(); + assertSearchResponse(searchResponse); + + final Set commenters = getCommenters(); + final Map> commenterToComments = getCommenterToComments(); + + Terms categoryTerms = searchResponse.getAggregations().get("to_commenter"); + assertThat("Request: " + searchRequest + "\nResponse: " + searchResponse + "\n", + categoryTerms.getBuckets().size(), equalTo(commenters.size())); + for (Terms.Bucket commenterBucket : categoryTerms.getBuckets()) { + Set comments = commenterToComments.get(commenterBucket.getKeyAsString()); + assertNotNull(comments); + assertThat("Failed for commenter " + commenterBucket.getKeyAsString(), + commenterBucket.getDocCount(), equalTo((long)comments.size())); + + Parent articleAgg = commenterBucket.getAggregations().get("to_article"); + assertThat(articleAgg.getName(), equalTo("to_article")); + // find all articles for the comments for the current commenter + Set articles = articleToControl.values().stream().flatMap( + (Function>) parentControl -> parentControl.commentIds.stream(). + filter(comments::contains) + ).collect(Collectors.toSet()); + + assertThat(articleAgg.getDocCount(), equalTo((long)articles.size())); + + Terms categoryAgg = articleAgg.getAggregations().get("to_category"); + assertNotNull(categoryAgg); + + List categories = categoryToControl.entrySet(). + stream(). + filter(entry -> entry.getValue().commenterToCommentId.containsKey(commenterBucket.getKeyAsString())). + map(Map.Entry::getKey). + collect(Collectors.toList()); + + for (String category : categories) { + Terms.Bucket categoryBucket = categoryAgg.getBucketByKey(category); + assertNotNull(categoryBucket); + + Aggregation topCategory = categoryBucket.getAggregations().get("top_category"); + assertNotNull(topCategory); + } + } + + for (String commenter : commenters) { + Terms.Bucket categoryBucket = categoryTerms.getBucketByKey(commenter); + assertThat(categoryBucket.getKeyAsString(), equalTo(commenter)); + assertThat(categoryBucket.getDocCount(), equalTo((long) commenterToComments.get(commenter).size())); + + Parent childrenBucket = categoryBucket.getAggregations().get("to_article"); + assertThat(childrenBucket.getName(), equalTo("to_article")); + } + } + + private Set getCommenters() { + return categoryToControl.values().stream().flatMap( + (Function>) control -> control.commenterToCommentId.keySet().stream()). + collect(Collectors.toSet()); + } + + private Map> getCommenterToComments() { + final Map> commenterToComments = new HashMap<>(); + for (Control control : categoryToControl.values()) { + for (Map.Entry> entry : control.commenterToCommentId.entrySet()) { + final Set comments = commenterToComments.computeIfAbsent(entry.getKey(), s -> new HashSet<>()); + comments.addAll(entry.getValue()); + } + } + return commenterToComments; + } + + public void testNonExistingParentType() throws Exception { + SearchResponse searchResponse = client().prepareSearch("test") + .addAggregation( + parent("non-existing", "xyz") + ).get(); + assertSearchResponse(searchResponse); + + Parent parent = searchResponse.getAggregations().get("non-existing"); + assertThat(parent.getName(), equalTo("non-existing")); + assertThat(parent.getDocCount(), equalTo(0L)); + } + + public void testTermsParentAggTerms() throws Exception { + final SearchRequestBuilder searchRequest = client().prepareSearch("test") + .setSize(10000) + .setQuery(matchQuery("randomized", true)) + .addAggregation( + terms("to_commenter").field("commenter").size(10000).subAggregation( + parent("to_article", "comment").subAggregation( + terms("to_category").field("category").size(10000)))); + SearchResponse searchResponse = searchRequest.get(); + assertSearchResponse(searchResponse); + + final Set commenters = getCommenters(); + final Map> commenterToComments = getCommenterToComments(); + + Terms commentersAgg = searchResponse.getAggregations().get("to_commenter"); + assertThat("Request: " + searchRequest + "\nResponse: " + searchResponse + "\n", + commentersAgg.getBuckets().size(), equalTo(commenters.size())); + for (Terms.Bucket commenterBucket : commentersAgg.getBuckets()) { + Set comments = commenterToComments.get(commenterBucket.getKeyAsString()); + assertNotNull(comments); + assertThat("Failed for commenter " + commenterBucket.getKeyAsString(), + commenterBucket.getDocCount(), equalTo((long)comments.size())); + + Parent articleAgg = commenterBucket.getAggregations().get("to_article"); + assertThat(articleAgg.getName(), equalTo("to_article")); + // find all articles for the comments for the current commenter + Set articles = articleToControl.values().stream().flatMap( + (Function>) parentControl -> parentControl.commentIds.stream(). + filter(comments::contains) + ).collect(Collectors.toSet()); + + assertThat(articleAgg.getDocCount(), equalTo((long)articles.size())); + + Terms categoryAgg = articleAgg.getAggregations().get("to_category"); + assertNotNull(categoryAgg); + + List categories = categoryToControl.entrySet(). + stream(). + filter(entry -> entry.getValue().commenterToCommentId.containsKey(commenterBucket.getKeyAsString())). + map(Map.Entry::getKey). + collect(Collectors.toList()); + + for (String category : categories) { + Terms.Bucket categoryBucket = categoryAgg.getBucketByKey(category); + assertNotNull(categoryBucket); + } + } + } +} diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentTests.java new file mode 100644 index 0000000000000..1df36d28b49e5 --- /dev/null +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.join.aggregations; + +import java.util.Collection; +import java.util.Collections; + +import org.elasticsearch.join.ParentJoinPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.aggregations.BaseAggregationTestCase; + +public class ParentTests extends BaseAggregationTestCase { + + @Override + protected Collection> getPlugins() { + return Collections.singleton(ParentJoinPlugin.class); + } + + @Override + protected ParentAggregationBuilder createTestAggregatorBuilder() { + String name = randomAlphaOfLengthBetween(3, 20); + String parentType = randomAlphaOfLengthBetween(5, 40); + return new ParentAggregationBuilder(name, parentType); + } +} diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index 4b46537bb1650..6ac073ef90a02 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -327,7 +327,7 @@ Tuple, Map>> extractTermsAndRanges(IndexRead extractedTerms.add(builder.toBytesRef()); } } - if (info.getPointDimensionCount() == 1) { // not != 0 because range fields are not supported + if (info.getPointIndexDimensionCount() == 1) { // not != 0 because range fields are not supported PointValues values = reader.getPointValues(info.name); List encodedPointValues = new ArrayList<>(); encodedPointValues.add(values.getMinPackedValue().clone()); @@ -409,7 +409,15 @@ public void parse(ParseContext context) throws IOException { Version indexVersion = context.mapperService().getIndexSettings().getIndexVersionCreated(); createQueryBuilderField(indexVersion, queryBuilderField, queryBuilder, context); - Query query = toQuery(queryShardContext, isMapUnmappedFieldAsText(), queryBuilder); + + QueryBuilder queryBuilderForProcessing = queryBuilder.rewrite(new QueryShardContext(queryShardContext) { + + @Override + public boolean convertNowRangeToMatchAll() { + return true; + } + }); + Query query = toQuery(queryShardContext, isMapUnmappedFieldAsText(), queryBuilderForProcessing); processQuery(query, context); } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 3d9a8fb8ebb08..07f47df41e60d 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -38,7 +38,7 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.MultiDocValues; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.PostingsEnum; @@ -1090,7 +1090,7 @@ private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryInd String queryToString = shardSearcher.doc(controlTopDocs.scoreDocs[i].doc).get("query_to_string"); logger.error("controlTopDocs.scoreDocs[{}].query_to_string={}", i, queryToString); - TermsEnum tenum = MultiFields.getFields(shardSearcher.getIndexReader()).terms(fieldType.queryTermsField.name()).iterator(); + TermsEnum tenum = MultiTerms.getTerms(shardSearcher.getIndexReader(), fieldType.queryTermsField.name()).iterator(); StringBuilder builder = new StringBuilder(); for (BytesRef term = tenum.next(); term != null; term = tenum.next()) { PostingsEnum penum = tenum.postings(null); diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java index b7693f514393b..90c456fee9a6d 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorQuerySearchTests.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.percolator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; @@ -26,10 +28,14 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.Operator; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; @@ -47,9 +53,14 @@ import java.util.function.Function; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; +import static org.elasticsearch.index.query.QueryBuilders.scriptQuery; +import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; +import static org.hamcrest.Matchers.equalTo; public class PercolatorQuerySearchTests extends ESSingleNodeTestCase { @@ -221,4 +232,53 @@ public void testMapUnmappedFieldAsText() throws IOException { assertSearchHits(response, "1"); } + public void testRangeQueriesWithNow() throws Exception { + IndexService indexService = createIndex("test", Settings.builder().put("index.number_of_shards", 1).build(), "_doc", + "field1", "type=keyword", "field2", "type=date", "query", "type=percolator"); + + client().prepareIndex("test", "_doc", "1") + .setSource(jsonBuilder().startObject().field("query", rangeQuery("field2").from("now-1h").to("now+1h")).endObject()) + .get(); + client().prepareIndex("test", "_doc", "2") + .setSource(jsonBuilder().startObject().field("query", boolQuery() + .filter(termQuery("field1", "value")) + .filter(rangeQuery("field2").from("now-1h").to("now+1h")) + ).endObject()) + .get(); + + + Script script = new Script(ScriptType.INLINE, MockScriptPlugin.NAME, "1==1", Collections.emptyMap()); + client().prepareIndex("test", "_doc", "3") + .setSource(jsonBuilder().startObject().field("query", boolQuery() + .filter(scriptQuery(script)) + .filter(rangeQuery("field2").from("now-1h").to("now+1h")) + ).endObject()) + .get(); + client().admin().indices().prepareRefresh().get(); + + try (Engine.Searcher engineSearcher = indexService.getShard(0).acquireSearcher("test")) { + IndexSearcher indexSearcher = engineSearcher.searcher(); + long[] currentTime = new long[] {System.currentTimeMillis()}; + QueryShardContext queryShardContext = + indexService.newQueryShardContext(0, engineSearcher.reader(), () -> currentTime[0], null); + + BytesReference source = BytesReference.bytes(jsonBuilder().startObject() + .field("field1", "value") + .field("field2", currentTime[0]) + .endObject()); + QueryBuilder queryBuilder = new PercolateQueryBuilder("query", source, XContentType.JSON); + Query query = queryBuilder.toQuery(queryShardContext); + assertThat(indexSearcher.count(query), equalTo(3)); + + currentTime[0] = currentTime[0] + 10800000; // + 3 hours + source = BytesReference.bytes(jsonBuilder().startObject() + .field("field1", "value") + .field("field2", currentTime[0]) + .endObject()); + queryBuilder = new PercolateQueryBuilder("query", source, XContentType.JSON); + query = queryBuilder.toQuery(queryShardContext); + assertThat(indexSearcher.count(query), equalTo(3)); + } + } + } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java index 70afcc86ad8f9..c2c841f889aff 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/Netty4Plugin.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; @@ -81,8 +82,8 @@ public Map> getTransports(Settings settings, ThreadP CircuitBreakerService circuitBreakerService, NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) { - return Collections.singletonMap(NETTY_TRANSPORT_NAME, () -> new Netty4Transport(settings, threadPool, networkService, bigArrays, - namedWriteableRegistry, circuitBreakerService)); + return Collections.singletonMap(NETTY_TRANSPORT_NAME, () -> new Netty4Transport(settings, Version.CURRENT, threadPool, + networkService, bigArrays, namedWriteableRegistry, circuitBreakerService)); } @Override diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java index bee98362e0c1e..af66b7c79881a 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java @@ -21,11 +21,15 @@ import io.netty.channel.Channel; import io.netty.channel.ChannelException; +import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelOption; import io.netty.channel.ChannelPromise; + import java.io.IOException; + import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.transport.TcpChannel; @@ -37,11 +41,13 @@ public class Netty4TcpChannel implements TcpChannel { private final Channel channel; private final String profile; + private final CompletableContext connectContext; private final CompletableContext closeContext = new CompletableContext<>(); - Netty4TcpChannel(Channel channel, String profile) { + Netty4TcpChannel(Channel channel, String profile, @Nullable ChannelFuture connectFuture) { this.channel = channel; this.profile = profile; + this.connectContext = new CompletableContext<>(); this.channel.closeFuture().addListener(f -> { if (f.isSuccess()) { closeContext.complete(null); @@ -55,6 +61,20 @@ public class Netty4TcpChannel implements TcpChannel { } } }); + + connectFuture.addListener(f -> { + if (f.isSuccess()) { + connectContext.complete(null); + } else { + Throwable cause = f.cause(); + if (cause instanceof Error) { + ExceptionsHelper.maybeDieOnAnotherThread(cause); + connectContext.completeExceptionally(new Exception(cause)); + } else { + connectContext.completeExceptionally((Exception) cause); + } + } + }); } @Override @@ -72,6 +92,11 @@ public void addCloseListener(ActionListener listener) { closeContext.addListener(ActionListener.toBiConsumer(listener)); } + @Override + public void addConnectListener(ActionListener listener) { + connectContext.addListener(ActionListener.toBiConsumer(listener)); + } + @Override public void setSoLinger(int value) throws IOException { if (channel.isOpen()) { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index a4e5731cd6226..70b5b84609948 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -38,7 +38,7 @@ import io.netty.util.concurrent.Future; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.ActionListener; +import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -101,9 +101,9 @@ public class Netty4Transport extends TcpTransport { private volatile Bootstrap clientBootstrap; private volatile NioEventLoopGroup eventLoopGroup; - public Netty4Transport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, + public Netty4Transport(Settings settings, Version version, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { - super("netty", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); + super("netty", settings, version, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); Netty4Utils.setAvailableProcessors(EsExecutors.PROCESSORS_SETTING.get(settings)); this.workerCount = WORKER_COUNT.get(settings); @@ -169,9 +169,8 @@ private Bootstrap createClientBootstrap(NioEventLoopGroup eventLoopGroup) { private void createServerBootstrap(ProfileSettings profileSettings, NioEventLoopGroup eventLoopGroup) { String name = profileSettings.profileName; if (logger.isDebugEnabled()) { - logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], " - + "receive_predictor[{}->{}]", - name, workerCount, profileSettings.portOrRange, profileSettings.bindHosts, profileSettings.publishHosts, compress, + logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], receive_predictor[{}->{}]", + name, workerCount, profileSettings.portOrRange, profileSettings.bindHosts, profileSettings.publishHosts, receivePredictorMin, receivePredictorMax); } @@ -216,37 +215,23 @@ protected ChannelHandler getClientChannelInitializer(DiscoveryNode node) { static final AttributeKey SERVER_CHANNEL_KEY = AttributeKey.newInstance("es-server-channel"); @Override - protected Netty4TcpChannel initiateChannel(DiscoveryNode node, ActionListener listener) throws IOException { + protected Netty4TcpChannel initiateChannel(DiscoveryNode node) throws IOException { InetSocketAddress address = node.getAddress().address(); Bootstrap bootstrapWithHandler = clientBootstrap.clone(); bootstrapWithHandler.handler(getClientChannelInitializer(node)); bootstrapWithHandler.remoteAddress(address); - ChannelFuture channelFuture = bootstrapWithHandler.connect(); + ChannelFuture connectFuture = bootstrapWithHandler.connect(); - Channel channel = channelFuture.channel(); + Channel channel = connectFuture.channel(); if (channel == null) { - ExceptionsHelper.maybeDieOnAnotherThread(channelFuture.cause()); - throw new IOException(channelFuture.cause()); + ExceptionsHelper.maybeDieOnAnotherThread(connectFuture.cause()); + throw new IOException(connectFuture.cause()); } addClosedExceptionLogger(channel); - Netty4TcpChannel nettyChannel = new Netty4TcpChannel(channel, "default"); + Netty4TcpChannel nettyChannel = new Netty4TcpChannel(channel, "default", connectFuture); channel.attr(CHANNEL_KEY).set(nettyChannel); - channelFuture.addListener(f -> { - if (f.isSuccess()) { - listener.onResponse(null); - } else { - Throwable cause = f.cause(); - if (cause instanceof Error) { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - listener.onFailure(new Exception(cause)); - } else { - listener.onFailure((Exception) cause); - } - } - }); - return nettyChannel; } @@ -309,7 +294,7 @@ protected ServerChannelInitializer(String name) { @Override protected void initChannel(Channel ch) throws Exception { addClosedExceptionLogger(ch); - Netty4TcpChannel nettyTcpChannel = new Netty4TcpChannel(ch, name); + Netty4TcpChannel nettyTcpChannel = new Netty4TcpChannel(ch, name, ch.newSucceededFuture()); ch.attr(CHANNEL_KEY).set(nettyTcpChannel); ch.pipeline().addLast("logging", new ESLoggingHandler()); ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder()); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java index 0f3185add0833..0125b2c45c0b3 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4ScheduledPingTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport.netty4; +import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -37,7 +38,6 @@ import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportResponseOptions; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -59,15 +59,15 @@ public void testScheduledPing() throws Exception { CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList()); - final Netty4Transport nettyA = new Netty4Transport(settings, threadPool, new NetworkService(Collections.emptyList()), - BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); + final Netty4Transport nettyA = new Netty4Transport(settings, Version.CURRENT, threadPool, + new NetworkService(Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); serviceA.start(); serviceA.acceptIncomingRequests(); - final Netty4Transport nettyB = new Netty4Transport(settings, threadPool, new NetworkService(Collections.emptyList()), - BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); + final Netty4Transport nettyB = new Netty4Transport(settings, Version.CURRENT, threadPool, + new NetworkService(Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry, circuitBreakerService); MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, null); @@ -90,7 +90,7 @@ public void testScheduledPing() throws Exception { serviceA.registerRequestHandler("internal:sayHello", TransportRequest.Empty::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { try { - channel.sendResponse(TransportResponse.Empty.INSTANCE, TransportResponseOptions.EMPTY); + channel.sendResponse(TransportResponse.Empty.INSTANCE); } catch (IOException e) { logger.error("Unexpected failure", e); fail(e.getMessage()); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java index 564cf61a39569..a711bb690e366 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport.netty4; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; @@ -65,7 +66,7 @@ public void startThreadPool() { threadPool = new ThreadPool(settings); NetworkService networkService = new NetworkService(Collections.emptyList()); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); - nettyTransport = new Netty4Transport(settings, threadPool, networkService, bigArrays, + nettyTransport = new Netty4Transport(settings, Version.CURRENT, threadPool, networkService, bigArrays, new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService()); nettyTransport.start(); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java index b81c8efcb47ee..b93e09b53649e 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4TransportIT.java @@ -108,7 +108,7 @@ public ExceptionThrowingNetty4Transport( BigArrays bigArrays, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { - super(settings, threadPool, networkService, bigArrays, namedWriteableRegistry, circuitBreakerService); + super(settings, Version.CURRENT, threadPool, networkService, bigArrays, namedWriteableRegistry, circuitBreakerService); } @Override diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NettyTransportMultiPortTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NettyTransportMultiPortTests.java index a49df3caaba4e..785c4cfb114bc 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NettyTransportMultiPortTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/NettyTransportMultiPortTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport.netty4; +import org.elasticsearch.Version; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -118,7 +119,7 @@ public void testThatDefaultProfilePortOverridesGeneralConfiguration() throws Exc private TcpTransport startTransport(Settings settings, ThreadPool threadPool) { BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); - TcpTransport transport = new Netty4Transport(settings, threadPool, new NetworkService(Collections.emptyList()), + TcpTransport transport = new Netty4Transport(settings, Version.CURRENT, threadPool, new NetworkService(Collections.emptyList()), bigArrays, new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService()); transport.start(); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index e7faac8ae01db..006fbae6c42e4 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -20,13 +20,13 @@ package org.elasticsearch.transport.netty4; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.node.Node; @@ -35,12 +35,12 @@ import org.elasticsearch.transport.AbstractSimpleTransportTestCase; import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; -import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Collections; @@ -54,23 +54,18 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase public static MockTransportService nettyFromThreadPool(Settings settings, ThreadPool threadPool, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); - Transport transport = new Netty4Transport(settings, threadPool, new NetworkService(Collections.emptyList()), + Transport transport = new Netty4Transport(settings, version, threadPool, new NetworkService(Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService()) { @Override - public Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) throws IOException, - InterruptedException { + public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionProfile profile, + ActionListener listener) { if (doHandshake) { - return super.executeHandshake(node, channel, timeout); + super.executeHandshake(node, channel, profile, listener); } else { - return version.minimumCompatibilityVersion(); + listener.onResponse(version.minimumCompatibilityVersion()); } } - - @Override - protected Version getCurrentVersion() { - return version; - } }; MockTransportService mockTransportService = MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..65e5ca3382240 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +b474e1a2d7f0172338a08f159849a6c491781d70 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index abc772945b1b4..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -429eb7e780c5a6e5200041a1f5b98bccd2623aaf \ No newline at end of file diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java index 73bf92ee872a5..5c23d636dc4a0 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuNormalizerTokenFilterFactory.java @@ -65,7 +65,7 @@ static Normalizer2 wrapWithUnicodeSetFilter(final IndexSettings indexSettings, final Normalizer2 normalizer, final Settings settings) { String unicodeSetFilter = settings.get("unicodeSetFilter"); - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0)) { if (unicodeSetFilter != null) { deprecationLogger.deprecated("[unicodeSetFilter] has been deprecated in favor of [unicode_set_filter]"); } else { diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..51fb0eebff73c --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +fc547e69837bcb808f1782bfa35490645bab9cae \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index e103c8c0c7c41..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -837fca1b1d7ca1dc002e53171801526644e52818 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..3389dc2f73ea1 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +e08961a2ec9414947693659ff79bb7e21a410298 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index b7a23ee518fcb..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1dde903172ade259cb26cbe320c25bc1d1356f89 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..b0854f657867a --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +09280919225656c7ce2a14af29666a02bd86c540 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 08b07e7c2f498..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6ca20e96a989e6e6706b8b7b8ad8c82d2a03576 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..00860c9fc832e --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +880f10393cdefff7575fbf5b2ced890666ec81dc \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 3f6fed19af1aa..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c96a2f25dea18b383423a41aca296734353d4bbd \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..4818fd1665f27 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +b41451a9d4e30b8a9a14ccdd7553e5796f77cf44 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 5dc03672c8753..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09363c5ce111d024a6da22a5ea8dbaf54d91dbd0 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..1b4f444999f58 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +145fd2c803d682c2cb2d78e6e350e09a09a09ea0 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index e940b50d640e1..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -13c3840d49480014118de99ef6e07a9e55c50172 \ No newline at end of file diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index aa619409c16eb..98f2febd79516 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.ec2; import com.amazonaws.services.ec2.model.Tag; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; @@ -74,8 +73,7 @@ public static void stopThreadPool() throws InterruptedException { public void createTransportService() { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); final Transport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE, - new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList()), - Version.CURRENT) { + new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList())) { @Override public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws UnknownHostException { // we just need to ensure we don't resolve DNS here diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index c9448dd88e756..79fefbc64d407 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -20,12 +20,9 @@ package org.elasticsearch.index.mapper.annotatedtext; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.Analyzer.TokenStreamComponents; import org.apache.lucene.analysis.AnalyzerWrapper; import org.apache.lucene.analysis.TokenFilter; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; @@ -69,20 +66,21 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Matcher; import java.util.regex.Pattern; import static org.elasticsearch.index.mapper.TypeParsers.parseTextField; /** A {@link FieldMapper} for full-text fields with annotation markup e.g. - * + * * "New mayor is [John Smith](type=person&value=John%20Smith) " - * + * * A special Analyzer wraps the default choice of analyzer in order * to strip the text field of annotation markup and inject the related * entity annotation tokens as supplementary tokens at the relevant points * in the token stream. - * This code is largely a copy of TextFieldMapper which is less than ideal - + * This code is largely a copy of TextFieldMapper which is less than ideal - * my attempts to subclass TextFieldMapper failed but we can revisit this. **/ public class AnnotatedTextFieldMapper extends FieldMapper { @@ -100,7 +98,7 @@ public static class Defaults { public static class Builder extends FieldMapper.Builder { private int positionIncrementGap = POSITION_INCREMENT_GAP_USE_ANALYZER; - + public Builder(String name) { super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); builder = this; @@ -118,7 +116,7 @@ public Builder positionIncrementGap(int positionIncrementGap) { this.positionIncrementGap = positionIncrementGap; return this; } - + @Override public Builder docValues(boolean docValues) { if (docValues) { @@ -141,8 +139,8 @@ public AnnotatedTextFieldMapper build(BuilderContext context) { fieldType.setSearchAnalyzer(new NamedAnalyzer(fieldType.searchAnalyzer(), positionIncrementGap)); fieldType.setSearchQuoteAnalyzer(new NamedAnalyzer(fieldType.searchQuoteAnalyzer(), positionIncrementGap)); } else { - //Using the analyzer's default BUT need to do the same thing AnalysisRegistry.processAnalyzerFactory - // does to splice in new default of posIncGap=100 by wrapping the analyzer + //Using the analyzer's default BUT need to do the same thing AnalysisRegistry.processAnalyzerFactory + // does to splice in new default of posIncGap=100 by wrapping the analyzer if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) { int overrideInc = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP; fieldType.setIndexAnalyzer(new NamedAnalyzer(fieldType.indexAnalyzer(), overrideInc)); @@ -162,7 +160,7 @@ public static class TypeParser implements Mapper.TypeParser { public Mapper.Builder parse( String fieldName, Map node, ParserContext parserContext) throws MapperParsingException { AnnotatedTextFieldMapper.Builder builder = new AnnotatedTextFieldMapper.Builder(fieldName); - + builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer()); builder.fieldType().setSearchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer()); builder.fieldType().setSearchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer()); @@ -181,7 +179,7 @@ public Mapper.Builder annotations; - + // Format is markdown-like syntax for URLs eg: // "New mayor is [John Smith](type=person&value=John%20Smith) " - static Pattern markdownPattern = Pattern.compile("\\[([^\\]\\[]*)\\]\\(([^\\)\\(]*)\\)"); - + static Pattern markdownPattern = Pattern.compile("\\[([^\\]\\[]*)\\]\\(([^\\)\\(]*)\\)"); + public static AnnotatedText parse (String textPlusMarkup) { List annotations =new ArrayList<>(); - Matcher m = markdownPattern.matcher(textPlusMarkup); + Matcher m = markdownPattern.matcher(textPlusMarkup); int lastPos = 0; StringBuilder sb = new StringBuilder(); while(m.find()){ if(m.start() > lastPos){ sb.append(textPlusMarkup.substring(lastPos, m.start())); } - + int startOffset = sb.length(); int endOffset = sb.length() + m.group(1).length(); sb.append(m.group(1)); lastPos = m.end(); - + String[] pairs = m.group(2).split("&"); String value = null; for (String pair : pairs) { String[] kv = pair.split("="); try { - if(kv.length == 2){ + if(kv.length == 2){ throw new ElasticsearchParseException("key=value pairs are not supported in annotations"); } if(kv.length == 1) { @@ -230,9 +228,9 @@ public static AnnotatedText parse (String textPlusMarkup) { } } catch (UnsupportedEncodingException uee){ throw new ElasticsearchParseException("Unsupported encoding parsing annotated text", uee); - } - } - } + } + } + } if(lastPos < textPlusMarkup.length()){ sb.append(textPlusMarkup.substring(lastPos)); } @@ -242,13 +240,13 @@ public static AnnotatedText parse (String textPlusMarkup) { protected AnnotatedText(String textMinusMarkup, String textPlusMarkup, List annotations) { this.textMinusMarkup = textMinusMarkup; this.textPlusMarkup = textPlusMarkup; - this.annotations = annotations; + this.annotations = annotations; } - + public static final class AnnotationToken { public final int offset; public final int endOffset; - + public final String value; public AnnotationToken(int offset, int endOffset, String value) { this.offset = offset; @@ -259,12 +257,12 @@ public AnnotationToken(int offset, int endOffset, String value) { public String toString() { return value +" ("+offset+" - "+endOffset+")"; } - + public boolean intersects(int start, int end) { return (start <= offset && end >= offset) || (start <= endOffset && end >= endOffset) || (start >= offset && end <= endOffset); } - + @Override public int hashCode() { final int prime = 31; @@ -274,7 +272,7 @@ public int hashCode() { result = prime * result + Objects.hashCode(value); return result; } - + @Override public boolean equals(Object obj) { if (this == obj) @@ -287,16 +285,16 @@ public boolean equals(Object obj) { return Objects.equals(endOffset, other.endOffset) && Objects.equals(offset, other.offset) && Objects.equals(value, other.value); } - + } - + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(textMinusMarkup); sb.append("\n"); annotations.forEach(a -> { - sb.append(a); + sb.append(a); sb.append("\n"); }); return sb.toString(); @@ -308,10 +306,10 @@ public int numAnnotations() { public AnnotationToken getAnnotation(int index) { return annotations.get(index); - } + } } - - // A utility class for use with highlighters where the content being highlighted + + // A utility class for use with highlighters where the content being highlighted // needs plain text format for highlighting but marked-up format for token discovery. // The class takes markedup format field values and returns plain text versions. // When asked to tokenize plain-text versions by the highlighter it tokenizes the @@ -330,7 +328,7 @@ public void init(String[] markedUpFieldValues) { annotations[i] = AnnotatedText.parse(markedUpFieldValues[i]); } } - + public String [] getPlainTextValuesForHighlighter(){ String [] result = new String[annotations.length]; for (int i = 0; i < annotations.length; i++) { @@ -338,127 +336,75 @@ public void init(String[] markedUpFieldValues) { } return result; } - + public AnnotationToken[] getIntersectingAnnotations(int start, int end) { List intersectingAnnotations = new ArrayList<>(); int fieldValueOffset =0; for (AnnotatedText fieldValueAnnotations : this.annotations) { //This is called from a highlighter where all of the field values are concatenated - // so each annotation offset will need to be adjusted so that it takes into account + // so each annotation offset will need to be adjusted so that it takes into account // the previous values AND the MULTIVAL delimiter for (AnnotationToken token : fieldValueAnnotations.annotations) { if(token.intersects(start - fieldValueOffset , end - fieldValueOffset)) { - intersectingAnnotations.add(new AnnotationToken(token.offset + fieldValueOffset, + intersectingAnnotations.add(new AnnotationToken(token.offset + fieldValueOffset, token.endOffset + fieldValueOffset, token.value)); } - } + } //add 1 for the fieldvalue separator character fieldValueOffset +=fieldValueAnnotations.textMinusMarkup.length() +1; } return intersectingAnnotations.toArray(new AnnotationToken[intersectingAnnotations.size()]); - } - + } + @Override public Analyzer getWrappedAnalyzer(String fieldName) { return delegate; - } - + } + @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - if(components instanceof AnnotatedHighlighterTokenStreamComponents){ - // already wrapped. - return components; - } AnnotationsInjector injector = new AnnotationsInjector(components.getTokenStream()); - return new AnnotatedHighlighterTokenStreamComponents(components.getTokenizer(), injector, this.annotations); - } - } - private static final class AnnotatedHighlighterTokenStreamComponents extends TokenStreamComponents{ - - private AnnotationsInjector annotationsInjector; - private AnnotatedText[] annotations; - int readerNum = 0; - - AnnotatedHighlighterTokenStreamComponents(Tokenizer source, AnnotationsInjector annotationsFilter, - AnnotatedText[] annotations) { - super(source, annotationsFilter); - this.annotationsInjector = annotationsFilter; - this.annotations = annotations; + AtomicInteger readerNum = new AtomicInteger(0); + return new TokenStreamComponents(r -> { + String plainText = readToString(r); + AnnotatedText at = this.annotations[readerNum.getAndIncrement()]; + assert at.textMinusMarkup.equals(plainText); + injector.setAnnotations(at); + components.getSource().accept(new StringReader(at.textMinusMarkup)); + }, injector); } + } - @Override - protected void setReader(Reader reader) { - String plainText = readToString(reader); - AnnotatedText at = this.annotations[readerNum++]; - assert at.textMinusMarkup.equals(plainText); - // This code is reliant on the behaviour of highlighter logic - it - // takes plain text multi-value fields and then calls the same analyzer - // for each field value in turn. This class has cached the annotations - // associated with each plain-text value and are arranged in the same order - annotationsInjector.setAnnotations(at); - super.setReader(new StringReader(at.textMinusMarkup)); - } - - } - - public static final class AnnotationAnalyzerWrapper extends AnalyzerWrapper { - private final Analyzer delegate; - public AnnotationAnalyzerWrapper (Analyzer delegate) { + public AnnotationAnalyzerWrapper(Analyzer delegate) { super(delegate.getReuseStrategy()); this.delegate = delegate; } - /** - * Wraps {@link StandardAnalyzer}. - */ - public AnnotationAnalyzerWrapper() { - this(new StandardAnalyzer()); - } - - @Override public Analyzer getWrappedAnalyzer(String fieldName) { return delegate; - } + } @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - if(components instanceof AnnotatedTokenStreamComponents){ - // already wrapped. + if (components.getTokenStream() instanceof AnnotationsInjector) { + // already wrapped return components; } AnnotationsInjector injector = new AnnotationsInjector(components.getTokenStream()); - return new AnnotatedTokenStreamComponents(components.getTokenizer(), injector); - } - } - - - //This Analyzer is not "wrappable" because of a limitation in Lucene https://issues.apache.org/jira/browse/LUCENE-8352 - private static final class AnnotatedTokenStreamComponents extends TokenStreamComponents{ - private AnnotationsInjector annotationsInjector; - - AnnotatedTokenStreamComponents(Tokenizer source, AnnotationsInjector annotationsInjector) { - super(source, annotationsInjector); - this.annotationsInjector = annotationsInjector; - } - - @Override - protected void setReader(Reader reader) { - // Sneaky code to change the content downstream components will parse. - // Replace the marked-up content Reader with a plain text Reader and prime the - // annotations injector with the AnnotatedTokens that need to be injected - // as plain-text parsing progresses. - AnnotatedText annotations = AnnotatedText.parse(readToString(reader)); - annotationsInjector.setAnnotations(annotations); - super.setReader(new StringReader(annotations.textMinusMarkup)); + return new TokenStreamComponents(r -> { + AnnotatedText annotations = AnnotatedText.parse(readToString(r)); + injector.setAnnotations(annotations); + components.getSource().accept(new StringReader(annotations.textMinusMarkup)); + }, injector); } } - - static String readToString(Reader reader) { + + static String readToString(Reader reader) { char[] arr = new char[8 * 1024]; StringBuilder buffer = new StringBuilder(); int numCharsRead; @@ -467,15 +413,15 @@ static String readToString(Reader reader) { buffer.append(arr, 0, numCharsRead); } reader.close(); - return buffer.toString(); + return buffer.toString(); } catch (IOException e) { throw new UncheckedIOException("IO Error reading field content", e); } - } + } + - public static final class AnnotationsInjector extends TokenFilter { - + private AnnotatedText annotatedText; AnnotatedText.AnnotationToken nextAnnotationForInjection = null; private int currentAnnotationIndex = 0; @@ -502,8 +448,8 @@ public void setAnnotations(AnnotatedText annotatedText) { nextAnnotationForInjection = null; } } - - + + @Override public void reset() throws IOException { @@ -512,7 +458,7 @@ public void reset() throws IOException { inputExhausted = false; super.reset(); } - + // Abstracts if we are pulling from some pre-cached buffer of // text tokens or directly from the wrapped TokenStream private boolean internalNextToken() throws IOException{ @@ -524,7 +470,7 @@ private boolean internalNextToken() throws IOException{ pendingStates.clear(); } return true; - } + } if(inputExhausted) { return false; } @@ -579,28 +525,28 @@ private void emitAnnotation(int firstSpannedTextPosInc, int annotationPosLen) th posLenAtt.setPositionLength(annotationPosLen); textOffsetAtt.setOffset(nextAnnotationForInjection.offset, nextAnnotationForInjection.endOffset); setType(nextAnnotationForInjection); - + // We may have multiple annotations at this location - stack them up final int annotationOffset = nextAnnotationForInjection.offset; final AnnotatedText.AnnotationToken firstAnnotationAtThisPos = nextAnnotationForInjection; while (nextAnnotationForInjection != null && nextAnnotationForInjection.offset == annotationOffset) { - + setType(nextAnnotationForInjection); termAtt.resizeBuffer(nextAnnotationForInjection.value.length()); termAtt.copyBuffer(nextAnnotationForInjection.value.toCharArray(), 0, nextAnnotationForInjection.value.length()); - + if (nextAnnotationForInjection == firstAnnotationAtThisPos) { posAtt.setPositionIncrement(firstSpannedTextPosInc); //Put at the head of the queue of tokens to be emitted - pendingStates.add(0, captureState()); + pendingStates.add(0, captureState()); } else { - posAtt.setPositionIncrement(0); + posAtt.setPositionIncrement(0); //Put after the head of the queue of tokens to be emitted - pendingStates.add(1, captureState()); + pendingStates.add(1, captureState()); } - - + + // Flag the inject annotation as null to prevent re-injection. currentAnnotationIndex++; if (currentAnnotationIndex < annotatedText.numAnnotations()) { @@ -614,7 +560,7 @@ private void emitAnnotation(int firstSpannedTextPosInc, int annotationPosLen) th } } - + public static final class AnnotatedTextFieldType extends StringFieldType { @@ -625,7 +571,7 @@ public AnnotatedTextFieldType() { protected AnnotatedTextFieldType(AnnotatedTextFieldType ref) { super(ref); } - + @Override public void setIndexAnalyzer(NamedAnalyzer delegate) { if(delegate.analyzer() instanceof AnnotationAnalyzerWrapper){ @@ -655,7 +601,7 @@ public Query existsQuery(QueryShardContext context) { return new NormsFieldExistsQuery(name()); } } - + @Override public Query phraseQuery(String field, TokenStream stream, int slop, boolean enablePosIncrements) throws IOException { PhraseQuery.Builder builder = new PhraseQuery.Builder(); @@ -678,7 +624,7 @@ public Query phraseQuery(String field, TokenStream stream, int slop, boolean ena return builder.build(); } - + @Override public Query multiPhraseQuery(String field, TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { @@ -713,12 +659,12 @@ public Query multiPhraseQuery(String field, TokenStream stream, int slop, boolea mpqb.add(multiTerms.toArray(new Term[0])); } return mpqb.build(); - } + } } - + private int positionIncrementGap; protected AnnotatedTextFieldMapper(String simpleName, AnnotatedTextFieldType fieldType, MappedFieldType defaultFieldType, - int positionIncrementGap, + int positionIncrementGap, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); assert fieldType.tokenized(); @@ -774,6 +720,6 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, if (includeDefaults || positionIncrementGap != POSITION_INCREMENT_GAP_USE_ANALYZER) { builder.field("position_increment_gap", positionIncrementGap); - } + } } } diff --git a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java index 2fcf917ab1d79..ca29521802fe2 100644 --- a/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java +++ b/plugins/mapper-annotated-text/src/test/java/org/elasticsearch/search/highlight/AnnotatedTextHighlighterTests.java @@ -57,7 +57,7 @@ import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR; import static org.hamcrest.CoreMatchers.equalTo; -public class AnnotatedTextHighlighterTests extends ESTestCase { +public class AnnotatedTextHighlighterTests extends ESTestCase { private void assertHighlightOneDoc(String fieldName, String []markedUpInputs, Query query, Locale locale, BreakIterator breakIterator, diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java index 6566063d220d3..dc14373026430 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java @@ -63,7 +63,7 @@ public void testSizeEnabled() throws Exception { boolean points = false; for (IndexableField field : doc.rootDoc().getFields("_size")) { stored |= field.fieldType().stored(); - points |= field.fieldType().pointDimensionCount() > 0; + points |= field.fieldType().pointIndexDimensionCount() > 0; } assertTrue(stored); assertTrue(points); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index 72b62a930aeee..c6e8335bd5a6d 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -59,6 +59,7 @@ public List> getSettings() { AzureStorageSettings.KEY_SETTING, AzureStorageSettings.ENDPOINT_SUFFIX_SETTING, AzureStorageSettings.TIMEOUT_SETTING, + AzureStorageSettings.MAX_RETRIES_SETTING, AzureStorageSettings.PROXY_TYPE_SETTING, AzureStorageSettings.PROXY_HOST_SETTING, AzureStorageSettings.PROXY_PORT_SETTING diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index c4e4c1439e45f..1c90f97a43728 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -54,7 +54,7 @@ final class AzureStorageSettings { key -> SecureSetting.secureString(key, null)); /** max_retries: Number of retries in case of Azure errors. Defaults to 3 (RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT). */ - private static final Setting MAX_RETRIES_SETTING = + public static final Setting MAX_RETRIES_SETTING = Setting.affixKeySetting(AZURE_CLIENT_PREFIX_KEY, "max_retries", (key) -> Setting.intSetting(key, RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT, Setting.Property.NodeScope), ACCOUNT_SETTING, KEY_SETTING); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index 3b3793f22ba04..f7b49bd24adf6 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -25,9 +25,11 @@ import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Proxy; @@ -35,6 +37,7 @@ import java.net.URISyntaxException; import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; +import java.util.Collections; import java.util.Map; import static org.elasticsearch.repositories.azure.AzureStorageService.blobNameFromUri; @@ -60,10 +63,24 @@ public void testReadSecuredSettings() { assertThat(loadedSettings.get("azure3").getEndpointSuffix(), equalTo("my_endpoint_suffix")); } + private AzureRepositoryPlugin pluginWithSettingsValidation(Settings settings) { + final AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings); + new SettingsModule(settings, plugin.getSettings(), Collections.emptyList(), Collections.emptySet()); + return plugin; + } + + private AzureStorageService storageServiceWithSettingsValidation(Settings settings) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { + return plugin.azureStoreService; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + public void testCreateClientWithEndpointSuffix() throws IOException { final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.endpoint_suffix", "my_endpoint_suffix").build(); - try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getEndpoint().toString(), equalTo("https://myaccount1.blob.my_endpoint_suffix")); @@ -85,7 +102,7 @@ public void testReinitClientSettings() throws IOException { secureSettings2.setString("azure.client.azure3.account", "myaccount23"); secureSettings2.setString("azure.client.azure3.key", encodeKey("mykey23")); final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); - try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings1)) { final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount11.blob.core.windows.net")); @@ -117,7 +134,7 @@ public void testReinitClientEmptySettings() throws IOException { secureSettings.setString("azure.client.azure1.account", "myaccount1"); secureSettings.setString("azure.client.azure1.key", encodeKey("mykey11")); final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); @@ -141,7 +158,7 @@ public void testReinitClientWrongSettings() throws IOException { secureSettings2.setString("azure.client.azure1.account", "myaccount1"); // missing key final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); - try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings1)) { final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); @@ -154,7 +171,7 @@ public void testReinitClientWrongSettings() throws IOException { } public void testGetSelectedClientNonExisting() { - final AzureStorageService azureStorageService = new AzureStorageService(buildSettings()); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(buildSettings()); final SettingsException e = expectThrows(SettingsException.class, () -> azureStorageService.client("azure4")); assertThat(e.getMessage(), is("Unable to find client with name [azure4]")); } @@ -164,7 +181,7 @@ public void testGetSelectedClientDefaultTimeout() { .setSecureSettings(buildSecureSettings()) .put("azure.client.azure3.timeout", "30s") .build(); - final AzureStorageService azureStorageService = new AzureStorageService(timeoutSettings); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(timeoutSettings); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), nullValue()); final CloudBlobClient client3 = azureStorageService.client("azure3").v1(); @@ -172,13 +189,13 @@ public void testGetSelectedClientDefaultTimeout() { } public void testGetSelectedClientNoTimeout() { - final AzureStorageService azureStorageService = new AzureStorageService(buildSettings()); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(buildSettings()); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(nullValue())); } public void testGetSelectedClientBackoffPolicy() { - final AzureStorageService azureStorageService = new AzureStorageService(buildSettings()); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(buildSettings()); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); @@ -190,7 +207,7 @@ public void testGetSelectedClientBackoffPolicyNbRetries() { .put("azure.client.azure1.max_retries", 7) .build(); - final AzureStorageService azureStorageService = new AzureStorageService(timeoutSettings); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(timeoutSettings); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); @@ -200,7 +217,7 @@ public void testNoProxy() { final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .build(); - final AzureStorageService mock = new AzureStorageService(settings); + final AzureStorageService mock = storageServiceWithSettingsValidation(settings); assertThat(mock.storageSettings.get("azure1").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); @@ -213,7 +230,7 @@ public void testProxyHttp() throws UnknownHostException { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "http") .build(); - final AzureStorageService mock = new AzureStorageService(settings); + final AzureStorageService mock = storageServiceWithSettingsValidation(settings); final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); @@ -233,7 +250,7 @@ public void testMultipleProxies() throws UnknownHostException { .put("azure.client.azure2.proxy.port", 8081) .put("azure.client.azure2.proxy.type", "http") .build(); - final AzureStorageService mock = new AzureStorageService(settings); + final AzureStorageService mock = storageServiceWithSettingsValidation(settings); final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); @@ -252,7 +269,7 @@ public void testProxySocks() throws UnknownHostException { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "socks") .build(); - final AzureStorageService mock = new AzureStorageService(settings); + final AzureStorageService mock = storageServiceWithSettingsValidation(settings); final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.SOCKS)); @@ -267,7 +284,7 @@ public void testProxyNoHost() { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } @@ -278,7 +295,7 @@ public void testProxyNoPort() { .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } @@ -289,7 +306,7 @@ public void testProxyNoType() { .put("azure.client.azure1.proxy.port", 8080) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); assertEquals("Azure Proxy port or host have been set but proxy type is not defined.", e.getMessage()); } @@ -301,7 +318,7 @@ public void testProxyWrongHost() { .put("azure.client.azure1.proxy.port", 8080) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); assertEquals("Azure proxy host is unknown.", e.getMessage()); } diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 510c101379d2f..d35a248f5a5e5 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -26,7 +26,7 @@ dependencies { compile 'com.google.cloud:google-cloud-storage:1.40.0' compile 'com.google.cloud:google-cloud-core:1.40.0' compile 'com.google.guava:guava:20.0' - compile 'joda-time:joda-time:2.10' + compile "joda-time:joda-time:${versions.joda}" compile 'com.google.http-client:google-http-client:1.24.1' compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 5c57c9208c536..3e6c7a1318df3 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -49,7 +49,7 @@ dependencies { compile 'com.fasterxml.jackson.core:jackson-databind:2.6.7.1' compile 'com.fasterxml.jackson.core:jackson-annotations:2.6.0' compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" - compile 'joda-time:joda-time:2.10' + compile "joda-time:joda-time:${versions.joda}" // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, // and whitelist this hack in JarHell diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 95313f906563d..837f2ea34c752 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -107,7 +107,8 @@ AmazonS3 buildClient(final S3ClientSettings clientSettings) { // // We do this because directly constructing the client is deprecated (was already deprecated in 1.1.223 too) // so this change removes that usage of a deprecated API. - builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, null)); + builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, null)) + .enablePathStyleAccess(); return builder.build(); } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTcpChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTcpChannel.java index 947a255b178c8..480043acbd899 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTcpChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTcpChannel.java @@ -58,6 +58,11 @@ public void addCloseListener(ActionListener listener) { addCloseListener(ActionListener.toBiConsumer(listener)); } + @Override + public void addConnectListener(ActionListener listener) { + addConnectListener(ActionListener.toBiConsumer(listener)); + } + @Override public void close() { getContext().closeChannel(); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 15f7d1e28943f..ab1e1411c3b81 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -20,7 +20,7 @@ package org.elasticsearch.transport.nio; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; +import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -66,10 +66,10 @@ public class NioTransport extends TcpTransport { private volatile NioGroup nioGroup; private volatile TcpChannelFactory clientChannelFactory; - protected NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, - PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, - CircuitBreakerService circuitBreakerService) { - super("nio", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); + protected NioTransport(Settings settings, Version version, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, + PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, + CircuitBreakerService circuitBreakerService) { + super("nio", settings, version, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService); this.pageCacheRecycler = pageCacheRecycler; } @@ -80,10 +80,9 @@ protected NioTcpServerChannel bind(String name, InetSocketAddress address) throw } @Override - protected NioTcpChannel initiateChannel(DiscoveryNode node, ActionListener connectListener) throws IOException { + protected NioTcpChannel initiateChannel(DiscoveryNode node) throws IOException { InetSocketAddress address = node.getAddress().address(); NioTcpChannel channel = nioGroup.openChannel(address, clientChannelFactory); - channel.addConnectListener(ActionListener.toBiConsumer(connectListener)); return channel; } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java index 1da8e909b2dd8..fd57ea20b1c8d 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport.nio; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; @@ -61,8 +62,8 @@ public Map> getTransports(Settings settings, ThreadP NamedWriteableRegistry namedWriteableRegistry, NetworkService networkService) { return Collections.singletonMap(NIO_TRANSPORT_NAME, - () -> new NioTransport(settings, threadPool, networkService, bigArrays, pageCacheRecycler, namedWriteableRegistry, - circuitBreakerService)); + () -> new NioTransport(settings, Version.CURRENT, threadPool, networkService, bigArrays, pageCacheRecycler, + namedWriteableRegistry, circuitBreakerService)); } @Override diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java index df53a4d79c7ad..0c1bad79ee8e6 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/NioTransportIT.java @@ -104,7 +104,8 @@ public Map> getTransports(Settings settings, ThreadP ExceptionThrowingNioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { - super(settings, threadPool, networkService, bigArrays, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService); + super(settings, Version.CURRENT, threadPool, networkService, bigArrays, pageCacheRecycler, namedWriteableRegistry, + circuitBreakerService); } @Override diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java index 33d40b9f735fa..70acc2d148241 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/transport/nio/SimpleNioTransportTests.java @@ -20,13 +20,13 @@ package org.elasticsearch.transport.nio; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -36,6 +36,7 @@ import org.elasticsearch.transport.AbstractSimpleTransportTestCase; import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.ConnectionProfile; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.Transport; @@ -57,24 +58,18 @@ public static MockTransportService nioFromThreadPool(Settings settings, ThreadPo ClusterSettings clusterSettings, boolean doHandshake) { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); NetworkService networkService = new NetworkService(Collections.emptyList()); - Transport transport = new NioTransport(settings, threadPool, - networkService, BigArrays.NON_RECYCLING_INSTANCE, new MockPageCacheRecycler(settings), namedWriteableRegistry, - new NoneCircuitBreakerService()) { + Transport transport = new NioTransport(settings, version, threadPool, networkService, BigArrays.NON_RECYCLING_INSTANCE, + new MockPageCacheRecycler(settings), namedWriteableRegistry, new NoneCircuitBreakerService()) { @Override - public Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) throws IOException, - InterruptedException { + public void executeHandshake(DiscoveryNode node, TcpChannel channel, ConnectionProfile profile, + ActionListener listener) { if (doHandshake) { - return super.executeHandshake(node, channel, timeout); + super.executeHandshake(node, channel, profile, listener); } else { - return version.minimumCompatibilityVersion(); + listener.onResponse(version.minimumCompatibilityVersion()); } } - - @Override - protected Version getCurrentVersion() { - return version; - } }; MockTransportService mockTransportService = MockTransportService.createNewService(settings, transport, version, threadPool, clusterSettings, Collections.emptySet()); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java index bebdb320db4a1..8438c002c2a4e 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -27,7 +27,6 @@ import org.apache.logging.log4j.core.appender.ConsoleAppender; import org.apache.logging.log4j.core.appender.CountingNoOpAppender; import org.apache.logging.log4j.core.config.Configurator; -import org.apache.logging.log4j.spi.ExtendedLogger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.Constants; import org.elasticsearch.cli.UserException; @@ -301,7 +300,7 @@ public void testPrefixLogger() throws IOException, IllegalAccessException, UserE setupLogging("prefix"); final String prefix = randomAlphaOfLength(16); - final Logger logger = new PrefixLogger((ExtendedLogger) LogManager.getLogger("prefix_test"), "prefix_test", prefix); + final Logger logger = new PrefixLogger(LogManager.getLogger("prefix_test"), prefix); logger.info("test"); logger.info("{}", "test"); final Exception e = new Exception("exception"); @@ -332,7 +331,7 @@ public void testPrefixLoggerMarkersCanBeCollected() throws IOException, UserExce final int prefixes = 1 << 19; // to ensure enough markers that the GC should collect some when we force a GC below for (int i = 0; i < prefixes; i++) { // this has the side effect of caching a marker with this prefix - new PrefixLogger((ExtendedLogger) LogManager.getLogger("prefix" + i), "prefix" + i, "prefix" + i); + new PrefixLogger(LogManager.getLogger("logger" + i), "prefix" + i); } System.gc(); // this will free the weakly referenced keys in the marker cache diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/monitor/os/EvilOsProbeTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/monitor/os/EvilOsProbeTests.java new file mode 100644 index 0000000000000..429019b3a134b --- /dev/null +++ b/qa/evil-tests/src/test/java/org/elasticsearch/monitor/os/EvilOsProbeTests.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.os; + +import org.apache.lucene.util.Constants; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.nio.file.Files; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.hamcrest.Matchers.equalTo; + +public class EvilOsProbeTests extends ESTestCase { + + public void testOsPrettyName() throws IOException { + final OsInfo osInfo = OsProbe.getInstance().osInfo(randomLongBetween(1, 100), randomIntBetween(1, 8)); + if (Constants.LINUX) { + final List lines = Files.readAllLines(PathUtils.get("/etc/os-release")); + for (final String line : lines) { + if (line != null && line.startsWith("PRETTY_NAME=")) { + final Matcher matcher = Pattern.compile("PRETTY_NAME=(\"?|'?)?([^\"']+)\\1").matcher(line); + assert matcher.matches() : line; + final String prettyName = matcher.group(2); + assertThat(osInfo.getPrettyName(), equalTo(prettyName)); + return; + } + } + assertThat(osInfo.getPrettyName(), equalTo("Linux")); + } else { + assertThat(osInfo.getPrettyName(), equalTo(Constants.OS_NAME)); + } + } + +} diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 4e27511fe04fb..a75b906e12324 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -56,7 +56,6 @@ for (Version version : bwcVersions.wireCompatible) { mustRunAfter(precommit) } - Object extension = extensions.findByName("${baseName}#oldClusterTestCluster") configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { bwcVersion = version numBwcNodes = 3 @@ -73,12 +72,12 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.rest.suite', 'old_cluster' } - Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed -> + Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure getOtherUnicastHostAddresses -> configure(extensions.findByName("${baseName}#${name}")) { dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" clusterName = 'rolling-upgrade' - unicastTransportUri = { seedNode, node, ant -> unicastSeed() } - minimumMasterNodes = { 3 } + otherUnicastHostAddresses = { getOtherUnicastHostAddresses() } + minimumMasterNodes = { 2 } /* Override the data directory so the new node always gets the node we * just stopped's data directory. */ dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } @@ -91,7 +90,7 @@ for (Version version : bwcVersions.wireCompatible) { configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner, 0, // Use all running nodes as seed nodes so there is no race between pinging and the tests - { oldClusterTest.nodes.get(1).transportUri() + ',' + oldClusterTest.nodes.get(2).transportUri() }) + { [oldClusterTest.nodes.get(1).transportUri(), oldClusterTest.nodes.get(2).transportUri()] }) Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner") oneThirdUpgradedTestRunner.configure { @@ -104,7 +103,7 @@ for (Version version : bwcVersions.wireCompatible) { configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner, 1, // Use all running nodes as seed nodes so there is no race between pinging and the tests - { oldClusterTest.nodes.get(2).transportUri() + ',' + oneThirdUpgradedTest.nodes.get(0).transportUri() }) + { [oldClusterTest.nodes.get(2).transportUri(), oneThirdUpgradedTest.nodes.get(0).transportUri()] }) Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner") twoThirdsUpgradedTestRunner.configure { @@ -117,7 +116,7 @@ for (Version version : bwcVersions.wireCompatible) { configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner, 2, // Use all running nodes as seed nodes so there is no race between pinging and the tests - { oneThirdUpgradedTest.nodes.get(0).transportUri() + ',' + twoThirdsUpgradedTest.nodes.get(0).transportUri() }) + { [oneThirdUpgradedTest.nodes.get(0).transportUri(), twoThirdsUpgradedTest.nodes.get(0).transportUri()] }) Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index 0b186db0f7a9f..eab1136ed1ed0 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -70,7 +70,7 @@ public void testIndexing() throws IOException { Version minimumIndexCompatibilityVersion = Version.CURRENT.minimumIndexCompatibilityVersion(); assertThat("this branch is not needed if we aren't compatible with 6.0", minimumIndexCompatibilityVersion.onOrBefore(Version.V_6_0_0), equalTo(true)); - if (minimumIndexCompatibilityVersion.before(Version.V_7_0_0_alpha1)) { + if (minimumIndexCompatibilityVersion.before(Version.V_7_0_0)) { XContentBuilder template = jsonBuilder(); template.startObject(); { diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index f5a9f25df16f3..e54e913aea171 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -516,7 +516,7 @@ wait_for_elasticsearch_status() { # $1 - expected version check_elasticsearch_version() { local version=$1 - local versionToCheck=$(echo $version | sed -e 's/-SNAPSHOT//') + local versionToCheck=$(echo $version | sed -e 's/-SNAPSHOT//' | sed -e 's/-\(alpha\|beta\|rc\)[0-9]//') run curl -s localhost:9200 [ "$status" -eq 0 ] diff --git a/qa/wildfly/build.gradle b/qa/wildfly/build.gradle index 6c6bf5d9b31b9..c967d4139d48a 100644 --- a/qa/wildfly/build.gradle +++ b/qa/wildfly/build.gradle @@ -129,7 +129,7 @@ task startWildfly { assert index >= 0 httpPort = Integer.parseInt(line.substring(index + 1)) // set this system property so the test runner knows the port Wildfly is listening for HTTP requests on - integTestRunner.systemProperty("tests.jboss.http.port", httpPort) + integTestRunner.systemProperty("tests.jboss.root", "http://localhost:$httpPort/wildfly-$version/transport") } else if (line.matches('.*Http management interface listening on http://.*:\\d+/management$')) { assert managementPort == 0 final int colonIndex = line.lastIndexOf(":") diff --git a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java b/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java index 1da713ea9bcd9..9aebffdc4ce3f 100644 --- a/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java +++ b/qa/wildfly/src/test/java/org/elasticsearch/wildfly/WildflyIT.java @@ -27,10 +27,9 @@ import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.util.EntityUtils; +import org.apache.log4j.Logger; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestRuleLimitSysouts; -import org.elasticsearch.Build; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.DeprecationHandler; @@ -54,14 +53,16 @@ @TestRuleLimitSysouts.Limit(bytes = 14000) public class WildflyIT extends LuceneTestCase { + Logger logger = Logger.getLogger(WildflyIT.class); + public void testTransportClient() throws URISyntaxException, IOException { try (CloseableHttpClient client = HttpClientBuilder.create().build()) { final String str = String.format( - Locale.ROOT, - "http://localhost:%d/wildfly-%s%s/transport/employees/1", - Integer.parseInt(System.getProperty("tests.jboss.http.port")), - Version.CURRENT, - Build.CURRENT.isSnapshot() ? "-SNAPSHOT" : ""); + Locale.ROOT, + "%s/employees/1", + System.getProperty("tests.jboss.root") + ); + logger.info("Connecting to uri: " + str); final HttpPut put = new HttpPut(new URI(str)); final String body; try (XContentBuilder builder = jsonBuilder()) { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json index 96c88ae933840..a07f362bef064 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/bulk.json @@ -16,10 +16,6 @@ } }, "params": { - "include_type_name": { - "type" : "string", - "description" : "Whether to add the type name to the response" - }, "wait_for_active_shards": { "type" : "string", "description" : "Sets the number of shard copies that must be active before proceeding with the bulk operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json index b146c34b441ea..6554fa659ec67 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete.json @@ -22,10 +22,6 @@ } }, "params": { - "include_type_name": { - "type" : "string", - "description" : "Whether to add the type name to the response" - }, "wait_for_active_shards": { "type" : "string", "description" : "Sets the number of shard copies that must be active before proceeding with the delete operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json index 65b0261985656..f97caede15a30 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json @@ -22,10 +22,6 @@ } }, "params": { - "include_type_name": { - "type" : "string", - "description" : "Whether to add the type name to the response" - }, "stored_fields": { "type": "list", "description" : "A comma-separated list of stored fields to return in the response" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index 3e07ff7acfa37..574206a0dc3ed 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -21,10 +21,6 @@ } }, "params": { - "include_type_name": { - "type" : "string", - "description" : "Whether to add the type name to the response" - }, "wait_for_active_shards": { "type" : "string", "description" : "Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index a1fdf7dbd83c6..5a576e8ce3c38 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -16,10 +16,6 @@ } }, "params": { - "include_type_name": { - "type" : "string", - "description" : "Whether to add the type name to the response" - }, "analyzer": { "type" : "string", "description" : "The analyzer to use for the query string" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index f1294e57cd30c..2d13b49e3cbbc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -22,10 +22,6 @@ } }, "params": { - "include_type_name": { - "type" : "string", - "description" : "Whether to add the type name to the response" - }, "wait_for_active_shards": { "type": "string", "description": "Sets the number of shard copies that must be active before proceeding with the update operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml index 536cb28d5484b..e8593c4c8d5ff 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/10_basic.yml @@ -3,11 +3,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: bulk: - include_type_name: false refresh: true body: - index: @@ -32,11 +31,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: bulk: - include_type_name: false refresh: true body: - index: @@ -69,14 +67,13 @@ - skip: version: " - 6.99.99" features: headers - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: catch: /Malformed action\/metadata line \[3\], expected FIELD_NAME but found \[END_OBJECT\]/ headers: Content-Type: application/json bulk: - include_type_name: false body: | {"index": {"_index": "test_index", "_id": "test_id"}} {"f1": "v1", "f2": 42} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yml index 742cf49c38e53..b23517f6a8f25 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/20_list_of_strings.yml @@ -3,11 +3,9 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: bulk: - include_type_name: false refresh: true body: - '{"index": {"_index": "test_index", "_id": "test_id"}}' diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/30_big_string.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/30_big_string.yml index 15a70fa3f378e..38706d133e44b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/30_big_string.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/30_big_string.yml @@ -3,11 +3,9 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: bulk: - include_type_name: false refresh: true body: | {"index": {"_index": "test_index", "_id": "test_id"}} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml index 3811eb8a18cc4..5e783d60d3d46 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/40_source.yml @@ -3,11 +3,9 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false refresh: true index: test_index id: test_id_1 @@ -15,7 +13,6 @@ - do: index: - include_type_name: false refresh: true index: test_index id: test_id_2 @@ -23,7 +20,6 @@ - do: index: - include_type_name: false refresh: true index: test_index id: test_id_3 @@ -32,7 +28,6 @@ - do: bulk: - include_type_name: false refresh: true body: | { "update": { "_index": "test_index", "_id": "test_id_1", "_source": true } } @@ -45,7 +40,6 @@ - do: bulk: - include_type_name: false index: test_index _source: true body: | @@ -56,7 +50,6 @@ - do: bulk: - include_type_name: false refresh: true body: | { "update": { "_index": "test_index", "_id": "test_id_1", "_source": {"includes": "bar"} } } @@ -71,7 +64,6 @@ - do: bulk: - include_type_name: false index: test_index _source_includes: foo body: | diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yml index 059794873add7..77098779c0c4f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/50_refresh.yml @@ -3,11 +3,9 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: bulk: - include_type_name: false refresh: true body: | {"index": {"_index": "bulk_50_refresh_1", "_id": "bulk_50_refresh_id1"}} @@ -25,11 +23,9 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: bulk: - include_type_name: false refresh: "" body: | {"index": {"_index": "bulk_50_refresh_2", "_id": "bulk_50_refresh_id3"}} @@ -48,11 +44,9 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: bulk: - include_type_name: false refresh: wait_for body: | {"index": {"_index": "bulk_50_refresh_3", "_id": "bulk_50_refresh_id5"}} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/10_basic.yml index 7f5d13125c3d4..842d749d7b14d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/10_basic.yml @@ -3,11 +3,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -16,16 +15,7 @@ - do: delete: - include_type_name: false index: test_1 id: 1 - match: { _version: 2 } - - - do: - catch: /illegal_argument_exception/ - delete: - include_type_name: false - index: index - type: type - id: 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml index c235b8ebfbf23..e3f210966563e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/11_shard_header.yml @@ -3,7 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: indices.create: @@ -20,19 +20,17 @@ - do: index: - include_type_name: false index: foobar id: 1 body: { foo: bar } - do: delete: - include_type_name: false index: foobar id: 1 - match: { _index: foobar } - - is_false: "_type" + - match: { _type: _doc } - match: { _id: "1"} - match: { _version: 2} - match: { _shards.total: 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/12_result.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/12_result.yml index f6d4fffb68af8..13356cd938c48 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/12_result.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/12_result.yml @@ -3,18 +3,16 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } - do: delete: - include_type_name: false index: test_1 id: 1 @@ -23,7 +21,6 @@ - do: catch: missing delete: - include_type_name: false index: test_1 id: 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/20_internal_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/20_internal_version.yml index c21617bcac621..afe69b4fe82e5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/20_internal_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/20_internal_version.yml @@ -3,11 +3,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -17,14 +16,12 @@ - do: catch: conflict delete: - include_type_name: false index: test_1 id: 1 version: 2 - do: delete: - include_type_name: false index: test_1 id: 1 version: 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/25_external_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/25_external_version.yml index c2cae2b6e1e5d..d7cc4fce0eda5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/25_external_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/25_external_version.yml @@ -3,11 +3,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -19,7 +18,6 @@ - do: catch: conflict delete: - include_type_name: false index: test_1 id: 1 version_type: external @@ -27,7 +25,6 @@ - do: delete: - include_type_name: false index: test_1 id: 1 version_type: external diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/26_external_gte_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/26_external_gte_version.yml index df119a57c12ce..ebe1680551c96 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/26_external_gte_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/26_external_gte_version.yml @@ -3,11 +3,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -19,7 +18,6 @@ - do: catch: conflict delete: - include_type_name: false index: test_1 id: 1 version_type: external_gte @@ -27,7 +25,6 @@ - do: delete: - include_type_name: false index: test_1 id: 1 version_type: external_gte @@ -37,7 +34,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -48,7 +44,6 @@ - do: delete: - include_type_name: false index: test_1 id: 1 version_type: external_gte diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml index a0ad089b0fbe8..f1647b8edac85 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/30_routing.yml @@ -3,7 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: indices.create: @@ -14,7 +14,6 @@ number_of_shards: 5 - do: index: - include_type_name: false index: test_1 id: 1 routing: 5 @@ -23,14 +22,12 @@ - do: catch: missing delete: - include_type_name: false index: test_1 id: 1 routing: 4 - do: delete: - include_type_name: false index: test_1 id: 1 routing: 5 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml index 326186bf07bf3..121959d2d976b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml @@ -3,7 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: indices.create: @@ -21,7 +21,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -32,7 +31,6 @@ # them to be different for this test to pass - do: index: - include_type_name: false index: test_1 id: 3 body: { foo: bar } @@ -41,7 +39,6 @@ - do: search: - include_type_name: false index: test_1 body: query: { terms: { _id: [1,3] }} @@ -50,13 +47,11 @@ - do: delete: - include_type_name: false index: test_1 id: 1 - do: search: - include_type_name: false index: test_1 body: query: { terms: { _id: [1,3] }} @@ -65,7 +60,6 @@ - do: delete: - include_type_name: false index: test_1 id: 3 refresh: true @@ -76,7 +70,6 @@ - do: search: - include_type_name: false index: test_1 body: query: { terms: { _id: [1,3] }} @@ -88,11 +81,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -101,7 +93,6 @@ - do: search: - include_type_name: false index: test_1 body: query: { term: { _id: 1 }} @@ -109,14 +100,12 @@ - do: delete: - include_type_name: false index: test_1 id: 1 refresh: "" - do: search: - include_type_name: false index: test_1 body: query: { term: { _id: 1 }} @@ -127,11 +116,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: delete_50_refresh_1 id: delete_50_refresh_id1 body: { foo: bar } @@ -140,7 +128,6 @@ - do: search: - include_type_name: false index: delete_50_refresh_1 body: query: { term: { _id: delete_50_refresh_id1 }} @@ -148,7 +135,6 @@ - do: delete: - include_type_name: false index: delete_50_refresh_1 id: delete_50_refresh_id1 refresh: wait_for @@ -156,7 +142,6 @@ - do: search: - include_type_name: false index: delete_50_refresh_1 body: query: { term: { _id: delete_50_refresh_id1 }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/60_missing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/60_missing.yml index 46b238482d76b..b8f81080f3ee8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/60_missing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/60_missing.yml @@ -3,12 +3,11 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: catch: missing delete: - include_type_name: false index: test_1 id: 1 @@ -17,11 +16,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: delete: - include_type_name: false index: test_1 id: 1 ignore: 404 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/10_basic.yml index 71403f0b56f78..9183c70c29bce 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/10_basic.yml @@ -3,30 +3,20 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 中文 body: { "foo": "Hello: 中文" } - do: get: - include_type_name: false index: test_1 id: 中文 - match: { _index: test_1 } - - is_false: "_type" + - match: { _type: _doc } - match: { _id: 中文 } - match: { _source: { foo: "Hello: 中文" } } - - - do: - catch: /illegal_argument_exception/ - get: - index: index - type: type - id: 1 - include_type_name: false diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/15_default_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/15_default_values.yml index fbab99fc3c6ed..67065270665cf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/15_default_values.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/15_default_values.yml @@ -3,23 +3,20 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 body: { "foo": "bar" } - do: get: - include_type_name: false index: test_1 id: 1 - match: { _index: test_1 } - - is_false: "_type" + - match: { _type: _doc } - match: { _id: '1' } - match: { _source: { foo: "bar" } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml index 20971728ffd96..7dd782652bf99 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/20_stored_fields.yml @@ -3,8 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: indices.create: include_type_name: false @@ -21,26 +20,23 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: { "foo": "bar", "count": 1 } - do: get: - include_type_name: false index: test_1 id: 1 stored_fields: foo - match: { _index: test_1 } - - is_false: "_type" + - match: { _type: _doc } - match: { _id: '1' } - match: { fields.foo: [bar] } - is_false: _source - do: get: - include_type_name: false index: test_1 id: 1 stored_fields: [foo, count] @@ -51,7 +47,6 @@ - do: get: - include_type_name: false index: test_1 id: 1 stored_fields: [foo, count, _source] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml index 941623142259b..61b4fc8a1597a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/40_routing.yml @@ -3,7 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: indices.create: @@ -22,7 +22,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 routing: 5 @@ -30,7 +29,6 @@ - do: get: - include_type_name: false index: test_1 id: 1 routing: 5 @@ -42,7 +40,6 @@ - do: catch: missing get: - include_type_name: false index: test_1 id: 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/50_with_headers.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/50_with_headers.yml index bd26eee1b5dd2..38130cee59810 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/50_with_headers.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/50_with_headers.yml @@ -3,11 +3,9 @@ - skip: features: ["headers", "yaml"] version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 body: { "body": "foo" } @@ -16,12 +14,11 @@ headers: Accept: application/yaml get: - include_type_name: false index: test_1 id: 1 - match: {_index: "test_1"} - - is_false: "_type" + - match: { _type: _doc } - match: {_id: "1"} - match: {_version: 1} - match: {found: true} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yml index c5955bf4d7a70..a1647835536e1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/60_realtime_refresh.yml @@ -3,7 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: indices.create: @@ -21,7 +21,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -29,14 +28,12 @@ - do: catch: missing get: - include_type_name: false index: test_1 id: 1 realtime: false - do: get: - include_type_name: false index: test_1 id: 1 realtime: true @@ -45,7 +42,6 @@ - do: get: - include_type_name: false index: test_1 id: 1 realtime: false diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml index f9247d1076159..4090636f1c21f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml @@ -3,8 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: indices.create: include_type_name: false @@ -18,40 +17,39 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: { "include": { "field1": "v1", "field2": "v2" }, "count": 1 } - do: - get: { include_type_name: false, index: test_1, id: 1, _source: false } + get: { index: test_1, id: 1, _source: false } - match: { _index: test_1 } - - is_false: "_type" + - match: { _type: _doc } - match: { _id: "1" } - is_false: _source - do: - get: { include_type_name: false, index: test_1, id: 1, _source: true } + get: { index: test_1, id: 1, _source: true } - match: { _source.include.field1: v1 } - do: - get: { include_type_name: false, index: test_1, id: 1, _source: include.field1 } + get: { index: test_1, id: 1, _source: include.field1 } - match: { _source.include.field1: v1 } - is_false: _source.include.field2 - do: - get: { include_type_name: false, index: test_1, id: 1, _source_includes: include.field1 } + get: { index: test_1, id: 1, _source_includes: include.field1 } - match: { _source.include.field1: v1 } - is_false: _source.include.field2 - do: - get: { include_type_name: false, index: test_1, id: 1, _source_includes: "include.field1,include.field2" } + get: { index: test_1, id: 1, _source_includes: "include.field1,include.field2" } - match: { _source.include.field1: v1 } - match: { _source.include.field2: v2 } - is_false: _source.count - do: - get: { include_type_name: false, index: test_1, id: 1, _source_includes: include, _source_excludes: "*.field2" } + get: { index: test_1, id: 1, _source_includes: include, _source_excludes: "*.field2" } - match: { _source.include.field1: v1 } - is_false: _source.include.field2 - is_false: _source.count @@ -59,14 +57,13 @@ - do: get: - include_type_name: false index: test_1 id: 1 stored_fields: count _source: true - match: { _index: test_1 } - - is_false: "_type" + - match: { _type: _doc } - match: { _id: "1" } - match: { fields.count: [1] } - match: { _source.include.field1: v1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/80_missing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/80_missing.yml index 48a6966b455b0..d7d8edfc65dcb 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/80_missing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/80_missing.yml @@ -3,12 +3,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: catch: missing get: - include_type_name: false index: test_1 id: 1 @@ -17,11 +15,9 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: get: - include_type_name: false index: test_1 id: 1 ignore: 404 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yml index 6975d4f5be518..9037a9113e937 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/90_versions.yml @@ -3,11 +3,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -15,7 +14,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -23,7 +21,6 @@ - do: get: - include_type_name: false index: test_1 id: 1 version: 2 @@ -32,14 +29,12 @@ - do: catch: conflict get: - include_type_name: false index: test_1 id: 1 version: 1 - do: get: - include_type_name: false index: test_1 id: 1 version: 2 @@ -49,7 +44,6 @@ - do: catch: conflict get: - include_type_name: false index: test_1 id: 1 version: 10 @@ -58,7 +52,6 @@ - do: catch: conflict get: - include_type_name: false index: test_1 id: 1 version: 1 @@ -66,7 +59,6 @@ - do: get: - include_type_name: false index: test_1 id: 1 version: 2 @@ -76,7 +68,6 @@ - do: catch: conflict get: - include_type_name: false index: test_1 id: 1 version: 10 @@ -85,7 +76,6 @@ - do: catch: conflict get: - include_type_name: false index: test_1 id: 1 version: 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml index 0deb76376945b..a129dcab80d9a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/10_with_id.yml @@ -3,28 +3,26 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test-weird-index-中文 id: 1 body: { foo: bar } - match: { _index: test-weird-index-中文 } - - is_false: "_type" + - match: { _type: _doc } - match: { _id: "1"} - match: { _version: 1} - do: get: - include_type_name: false index: test-weird-index-中文 id: 1 - match: { _index: test-weird-index-中文 } - - is_false: "_type" + - match: { _type: _doc } - match: { _id: "1"} - match: { _version: 1} - match: { _source: { foo: bar }} @@ -32,16 +30,6 @@ - do: catch: bad_request index: - include_type_name: false index: idx id: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa body: { foo: bar } - - - do: - catch: /illegal_argument_exception/ - index: - index: index - type: type - id: 1 - include_type_name: false - body: { foo: bar } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/12_result.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/12_result.yml index 7198c694b511a..f8a50415a95ef 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/12_result.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/12_result.yml @@ -3,11 +3,9 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_index id: 1 body: { foo: bar } @@ -16,7 +14,6 @@ - do: index: - include_type_name: false index: test_index id: 1 body: { foo: bar } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/15_without_id.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/15_without_id.yml index 3bd607c66fac7..073a4704b4ef8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/15_without_id.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/15_without_id.yml @@ -3,36 +3,26 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 body: { foo: bar } - is_true: _id - match: { _index: test_1 } - - is_false: "_type" + - match: { _type: _doc } - match: { _version: 1 } - set: { _id: id } - do: get: - include_type_name: false index: test_1 id: '$id' - match: { _index: test_1 } - - is_false: "_type" + - match: { _type: _doc } - match: { _id: $id } - match: { _version: 1 } - match: { _source: { foo: bar }} - - - do: - catch: /illegal_argument_exception/ - index: - index: index - type: type - include_type_name: false - body: { foo: bar } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/20_optype.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/20_optype.yml index ddab362b80f9f..c33a86093acab 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/20_optype.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/20_optype.yml @@ -3,11 +3,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 op_type: create @@ -16,7 +15,6 @@ - do: catch: conflict index: - include_type_name: false index: test_1 id: 1 op_type: create @@ -24,7 +22,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 op_type: index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/30_internal_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/30_internal_version.yml index 53351c24feb35..adc4f3f4b15c0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/30_internal_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/30_internal_version.yml @@ -3,11 +3,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -15,7 +14,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -24,14 +22,12 @@ - do: catch: conflict index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } version: 1 - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/35_external_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/35_external_version.yml index 054f8cad15ddf..89aaa190af384 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/35_external_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/35_external_version.yml @@ -3,11 +3,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -18,7 +17,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -30,7 +28,6 @@ - do: catch: conflict index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -40,7 +37,6 @@ - do: catch: conflict index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -49,7 +45,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/36_external_gte_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/36_external_gte_version.yml index 67f534db341df..82421227adb7f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/36_external_gte_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/36_external_gte_version.yml @@ -3,11 +3,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -18,7 +17,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -30,7 +28,6 @@ - do: catch: conflict index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } @@ -39,7 +36,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar2 } @@ -50,7 +46,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml index 523cf47f8582d..c3b577df4fe2c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/40_routing.yml @@ -3,8 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: indices.create: include_type_name: false @@ -22,7 +21,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 routing: 5 @@ -30,7 +28,6 @@ - do: get: - include_type_name: false index: test_1 id: 1 routing: 5 @@ -42,7 +39,6 @@ - do: catch: missing get: - include_type_name: false index: test_1 id: 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml index 346338791d61c..ec52d0cae89a9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml @@ -3,7 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: indices.create: @@ -16,14 +16,12 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: { foo: bar } - do: search: - include_type_name: false index: test_1 body: query: { term: { _id: 1 }} @@ -32,7 +30,6 @@ - do: index: - include_type_name: false index: test_1 id: 2 refresh: true @@ -41,7 +38,6 @@ - do: search: - include_type_name: false index: test_1 body: query: { term: { _id: 2 }} @@ -53,11 +49,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 refresh: "" @@ -66,7 +61,6 @@ - do: search: - include_type_name: false index: test_1 body: query: { term: { _id: 1 }} @@ -78,11 +72,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: index_60_refresh_1 id: index_60_refresh_id1 body: { foo: bar } @@ -91,7 +84,6 @@ - do: search: - include_type_name: false index: index_60_refresh_1 body: query: { term: { _id: index_60_refresh_id1 }} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml index a96c31e9ce6ab..e58e2bd8aa1fc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/10_basic.yml @@ -3,8 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: indices.create: include_type_name: false @@ -25,8 +24,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: indices.create: include_type_name: false @@ -46,8 +44,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: indices.create: include_type_name: false @@ -61,8 +58,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: indices.create: include_type_name: false @@ -80,8 +76,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: indices.create: include_type_name: false diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml index 36f45dd0e6018..ccebfc7c9423b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_mapping/10_basic.yml @@ -2,8 +2,7 @@ setup: - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: indices.create: include_type_name: false diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml index 7588c66188546..e4545320a973c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/10_basic.yml @@ -3,8 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: indices.create: include_type_name: false @@ -63,8 +62,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: indices.create: include_type_name: false @@ -84,8 +82,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: indices.create: index: index @@ -101,4 +98,3 @@ properties: bar: type: float - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml index c4133e8d01be5..c6212fbbd8fa5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/all_path_options.yml @@ -1,8 +1,7 @@ setup: - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: indices.create: include_type_name: false diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/220_no_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/220_no_types.yml deleted file mode 100644 index b2489d2ad012e..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/220_no_types.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -"No type returned": - - - skip: - version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - - - do: - index: - include_type_name: false - index: test_1 - id: 1 - body: {} - - do: - indices.refresh: {} - - - do: - search: - include_type_name: false - index: test_1 - - - length: { hits.hits: 1 } - - match: { hits.hits.0._index: "test_1" } - - is_false: "hits.hits.0._type" - - match: { hits.hits.0._id: "1" } - ---- -"Mixing include_type_name=false with explicit types": - - - skip: - version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - - - do: - catch: /illegal_argument_exception/ - search: - index: index - type: type - include_type_name: false - - - do: - catch: /illegal_argument_exception/ - search: - index: index - type: _doc - include_type_name: false diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/10_doc.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/10_doc.yml index dd5ada3b1f1e9..3a35ad46f9161 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/10_doc.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/10_doc.yml @@ -3,11 +3,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 body: @@ -17,7 +16,6 @@ - do: update: - include_type_name: false index: test_1 id: 1 body: @@ -27,13 +25,12 @@ one: 3 - match: { _index: test_1 } - - is_false: "_type" + - match: { _type: _doc } - match: { _id: "1" } - match: { _version: 2 } - do: get: - include_type_name: false index: test_1 id: 1 @@ -41,13 +38,3 @@ - match: { _source.count: 1 } - match: { _source.nested.one: 3 } - match: { _source.nested.two: 2 } - - - do: - catch: /illegal_argument_exception/ - update: - index: index - type: type - id: 1 - include_type_name: false - body: - doc: { foo: baz } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml index 69f8f7c6427f3..4afe78ca7d30a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/11_shard_header.yml @@ -3,7 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: indices.create: @@ -20,14 +20,12 @@ - do: index: - include_type_name: false index: foobar id: 1 body: { foo: bar } - do: update: - include_type_name: false index: foobar id: 1 body: @@ -35,7 +33,7 @@ foo: baz - match: { _index: foobar } - - is_false: "_type" + - match: { _type: _doc } - match: { _id: "1"} - match: { _version: 2} - match: { _shards.total: 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/12_result.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/12_result.yml index db4b56eedd391..657c036291bd6 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/12_result.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/12_result.yml @@ -3,11 +3,9 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: update: - include_type_name: false index: test_1 id: 1 body: @@ -19,7 +17,6 @@ - do: update: - include_type_name: false index: test_1 id: 1 body: @@ -31,7 +28,6 @@ - do: update: - include_type_name: false index: test_1 id: 1 body: @@ -44,7 +40,6 @@ - do: update: - include_type_name: false index: test_1 id: 1 body: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/20_doc_upsert.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/20_doc_upsert.yml index 1595e9d6f8a3f..a849eecc66629 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/20_doc_upsert.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/20_doc_upsert.yml @@ -3,11 +3,9 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: update: - include_type_name: false index: test_1 id: 1 body: @@ -16,7 +14,6 @@ - do: get: - include_type_name: false index: test_1 id: 1 @@ -26,7 +23,6 @@ - do: update: - include_type_name: false index: test_1 id: 1 body: @@ -35,7 +31,6 @@ - do: get: - include_type_name: false index: test_1 id: 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/22_doc_as_upsert.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/22_doc_as_upsert.yml index 884fa3e16f6e8..5bdc3ecea75fc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/22_doc_as_upsert.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/22_doc_as_upsert.yml @@ -3,11 +3,9 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: update: - include_type_name: false index: test_1 id: 1 body: @@ -16,7 +14,6 @@ - do: get: - include_type_name: false index: test_1 id: 1 @@ -26,7 +23,6 @@ - do: update: - include_type_name: false index: test_1 id: 1 body: @@ -35,7 +31,6 @@ - do: get: - include_type_name: false index: test_1 id: 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/30_internal_version.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/30_internal_version.yml index 20ff2020932d4..7b474d6bc09dc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/30_internal_version.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/30_internal_version.yml @@ -3,12 +3,11 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: catch: missing update: - include_type_name: false index: test_1 id: 1 version: 1 @@ -17,7 +16,6 @@ - do: index: - include_type_name: false index: test_1 id: 1 body: @@ -26,7 +24,6 @@ - do: catch: conflict update: - include_type_name: false index: test_1 id: 1 version: 2 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_other_versions.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_other_versions.yml index 904d3ce4b4f7a..9740aa39edeb3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_other_versions.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_other_versions.yml @@ -3,12 +3,11 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: catch: /Validation|Invalid/ update: - include_type_name: false index: test_1 id: 1 version: 2 @@ -20,7 +19,6 @@ - do: catch: /Validation|Invalid/ update: - include_type_name: false index: test_1 id: 1 version: 2 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml index 643d79239d0e4..374390f4b9716 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/40_routing.yml @@ -3,7 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: indices.create: @@ -22,7 +22,6 @@ - do: update: - include_type_name: false index: test_1 id: 1 routing: 5 @@ -32,7 +31,6 @@ - do: get: - include_type_name: false index: test_1 id: 1 routing: 5 @@ -43,7 +41,6 @@ - do: catch: missing update: - include_type_name: false index: test_1 id: 1 body: @@ -51,7 +48,6 @@ - do: update: - include_type_name: false index: test_1 id: 1 routing: 5 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml index 8039edc2e3a05..b590d01f93a7a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml @@ -3,7 +3,7 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: indices.create: @@ -16,7 +16,6 @@ - do: update: - include_type_name: false index: test_1 id: 1 body: @@ -25,7 +24,6 @@ - do: search: - include_type_name: false index: test_1 body: query: { term: { _id: 1 }} @@ -34,7 +32,6 @@ - do: update: - include_type_name: false index: test_1 id: 2 refresh: true @@ -45,7 +42,6 @@ - do: search: - include_type_name: false index: test_1 body: query: { term: { _id: 2 }} @@ -57,11 +53,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: test_1 id: 1 refresh: true @@ -70,7 +65,6 @@ - do: update: - include_type_name: false index: test_1 id: 1 refresh: "" @@ -80,7 +74,6 @@ - do: search: - include_type_name: false index: test_1 body: query: { term: { cat: dog }} @@ -92,11 +85,10 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 + reason: types are required in requests before 7.0.0 - do: index: - include_type_name: false index: update_60_refresh_1 id: update_60_refresh_id1 body: { foo: bar } @@ -105,7 +97,6 @@ - do: search: - include_type_name: false index: update_60_refresh_1 body: query: { term: { _id: update_60_refresh_id1 }} @@ -113,7 +104,6 @@ - do: update: - include_type_name: false index: update_60_refresh_1 id: update_60_refresh_id1 refresh: wait_for @@ -123,7 +113,6 @@ - do: search: - include_type_name: false index: update_60_refresh_1 body: query: { match: { test: asdf } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/80_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/80_source_filtering.yml index c69984e5cde39..9e6d5a4671955 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/80_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/80_source_filtering.yml @@ -3,11 +3,9 @@ - skip: version: " - 6.99.99" - reason: include_type_name was introduced in 7.0.0 - + reason: types are required in requests before 7.0.0 - do: update: - include_type_name: false index: test_1 id: 1 _source: [foo, bar] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml index 7838c20085103..14b096211c5c2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/85_fields_meta.yml @@ -12,7 +12,6 @@ - do: update: - include_type_name: false index: test_1 id: 1 parent: 5 @@ -25,7 +24,6 @@ - do: get: - include_type_name: false index: test_1 id: 1 parent: 5 diff --git a/server/build.gradle b/server/build.gradle index 412e067782782..1b507e542c45a 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -103,7 +103,7 @@ dependencies { compile 'com.carrotsearch:hppc:0.7.1' // time handling, remove with java 8 time - compile 'joda-time:joda-time:2.10' + compile "joda-time:joda-time:${versions.joda}" // percentiles aggregation compile 'com.tdunning:t-digest:3.2' diff --git a/server/licenses/joda-time-2.10.1.jar.sha1 b/server/licenses/joda-time-2.10.1.jar.sha1 new file mode 100644 index 0000000000000..75e809754ecee --- /dev/null +++ b/server/licenses/joda-time-2.10.1.jar.sha1 @@ -0,0 +1 @@ +9ac3dbf89dbf2ee385185dd0cd3064fe789efee0 \ No newline at end of file diff --git a/server/licenses/joda-time-2.10.jar.sha1 b/server/licenses/joda-time-2.10.jar.sha1 deleted file mode 100644 index a597eabc654bf..0000000000000 --- a/server/licenses/joda-time-2.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f66c8125d1057ffce6c4e29e624cac863e110e2b \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..3e54326a6c787 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +6bb87c96d76cdc70be77261d39376613b0a8860c \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 4d9522f10de5b..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dce55e44af096cb9029cb26d22a14d8a9c5223ce \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..187572e525147 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +1b29b3e3b080ec32073c007a1940e5aa7b195316 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index c86294acf5a3e..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d1d941758dc91ea7c2d515dd97b5d9b23b0f1874 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..68553b80b1a1b --- /dev/null +++ b/server/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +3757a90f73f505d40e6e200d1bacbff897f67548 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 75200bc0c1525..0000000000000 --- a/server/licenses/lucene-core-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e884b8ce62a2102b24bfdbe8911674cd5b0d06d9 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..75c05f55ed83b --- /dev/null +++ b/server/licenses/lucene-grouping-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +c918cc5ac54e5a4dba4740e9e45a93ebd3c95c77 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index b1ae597fadfb7..0000000000000 --- a/server/licenses/lucene-grouping-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3870972c07d7fa41a3bc58eb65952da53a16a406 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..afd8b925614fe --- /dev/null +++ b/server/licenses/lucene-highlighter-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +6cff1fa9ac25c840589d9a39a42ed4629b594cf4 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 02935671ce899..0000000000000 --- a/server/licenses/lucene-highlighter-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b8f0b73cfd01fc48735f1e06f16f7ccb47fc183e \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..6b525fa5ea64b --- /dev/null +++ b/server/licenses/lucene-join-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +2a843337e03493ab5f3498b5dd232fa9abb9e765 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index fdfab321a6791..0000000000000 --- a/server/licenses/lucene-join-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1d253fae720355e2ff40d529d62c2b3de403d0d0 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..9487a7fa579a0 --- /dev/null +++ b/server/licenses/lucene-memory-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +afda00bbee5fb8b4c36867eabb83267b3b2b8c10 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index d7c9cdf3e41d6..0000000000000 --- a/server/licenses/lucene-memory-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d9ca14bcda331a425d2d7c16022fdfd1c6942924 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..3e6fe1ce378c4 --- /dev/null +++ b/server/licenses/lucene-misc-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +a2d8bc6a0486cfa6b4de8c1103017b35c0193544 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 93ec704aeaeb0..0000000000000 --- a/server/licenses/lucene-misc-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -200454bbfe5ec93d941d9a9d27703883122a4522 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..dbb72428046fd --- /dev/null +++ b/server/licenses/lucene-queries-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +79a3b80245a9cf00f24f5d6e298a8e1a887760f1 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index d57b6be7fbf31..0000000000000 --- a/server/licenses/lucene-queries-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47915a125e54c845a4b540201cda88dc7612da08 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..db1d47c8307d0 --- /dev/null +++ b/server/licenses/lucene-queryparser-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +37c9970ec38f64e7ccecbe17efbabdaabe8da2ea \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 0ed04b6f69b41..0000000000000 --- a/server/licenses/lucene-queryparser-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5d49e1c6ee7550234539314e600e2893e13cb80 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..0e7ba7aeb9e94 --- /dev/null +++ b/server/licenses/lucene-sandbox-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +7103c3482c728a9788922aa39e39a5ed2bdd3a11 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 41c6a4a243ed7..0000000000000 --- a/server/licenses/lucene-sandbox-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68081b60905f1b53b3705b9cfa4403b8aba44352 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..bba0f7269e45e --- /dev/null +++ b/server/licenses/lucene-spatial-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +89d389c1020fac58f462819ad822c9b09e52f563 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 63734717b2fbc..0000000000000 --- a/server/licenses/lucene-spatial-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c99d56a453cecc7258300fd04b438713b944f1b9 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..1d8884aa8f23d --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +b62e34e522f3afa9c3f1655b97b995ff6ba2592d \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 3fa056da3db0a..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2471966478f829b6455556346014f02ff59f50c0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..1ff50782c1780 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +0c92f6b03eb226586b431a834dca90a1f2cd85b8 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index bd3d2e719a0ae..0000000000000 --- a/server/licenses/lucene-spatial3d-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -46e012be699251306ad13f4582c30d79cea4b307 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-31d7dfe6b1.jar.sha1 new file mode 100644 index 0000000000000..dd4d9e0665e6c --- /dev/null +++ b/server/licenses/lucene-suggest-8.0.0-snapshot-31d7dfe6b1.jar.sha1 @@ -0,0 +1 @@ +3a659287ba728f7a0d81694ce32e9ef741a13c19 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 deleted file mode 100644 index 8a4fc23cfcdae..0000000000000 --- a/server/licenses/lucene-suggest-8.0.0-snapshot-7d0a7782fa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dea19dd9e971d2a0171e7d78662f732b45148a27 \ No newline at end of file diff --git a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java index 563414171e98f..a4791e85ef3ca 100644 --- a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -53,7 +53,7 @@ public CustomFieldQuery(Query query, IndexReader reader, boolean phraseHighlight } @Override - void flatten(Query sourceQuery, IndexReader reader, Collection flatQueries, float boost) throws IOException { + protected void flatten(Query sourceQuery, IndexReader reader, Collection flatQueries, float boost) throws IOException { if (sourceQuery instanceof BoostQuery) { BoostQuery bq = (BoostQuery) sourceQuery; sourceQuery = bq.getQuery(); diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 9021e2d4c1f66..706d5095dee2c 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -114,6 +114,7 @@ public static Type fromDisplayName(final String displayName) { final String shortHash; final String date; final boolean isSnapshot; + final String version; flavor = Flavor.fromDisplayName(System.getProperty("es.distribution.flavor", "unknown")); type = Type.fromDisplayName(System.getProperty("es.distribution.type", "unknown")); @@ -121,12 +122,16 @@ public static Type fromDisplayName(final String displayName) { final String esPrefix = "elasticsearch-" + Version.CURRENT; final URL url = getElasticsearchCodeSourceLocation(); final String urlStr = url == null ? "" : url.toString(); - if (urlStr.startsWith("file:/") && (urlStr.endsWith(esPrefix + ".jar") || urlStr.endsWith(esPrefix + "-SNAPSHOT.jar"))) { + if (urlStr.startsWith("file:/") && ( + urlStr.endsWith(esPrefix + ".jar") || + urlStr.matches("(.*)" + esPrefix + "(-)?((alpha|beta|rc)[0-9]+)?(-SNAPSHOT)?.jar") + )) { try (JarInputStream jar = new JarInputStream(FileSystemUtils.openFileURLStream(url))) { Manifest manifest = jar.getManifest(); shortHash = manifest.getMainAttributes().getValue("Change"); date = manifest.getMainAttributes().getValue("Build-Date"); isSnapshot = "true".equals(manifest.getMainAttributes().getValue("X-Compile-Elasticsearch-Snapshot")); + version = manifest.getMainAttributes().getValue("X-Compile-Elasticsearch-Version"); } catch (IOException e) { throw new RuntimeException(e); } @@ -134,6 +139,7 @@ public static Type fromDisplayName(final String displayName) { // not running from the official elasticsearch jar file (unit tests, IDE, uber client jar, shadiness) shortHash = "Unknown"; date = "Unknown"; + version = "Unknown"; final String buildSnapshot = System.getProperty("build.snapshot"); if (buildSnapshot != null) { try { @@ -155,8 +161,12 @@ public static Type fromDisplayName(final String displayName) { throw new IllegalStateException("Error finding the build date. " + "Stopping Elasticsearch now so it doesn't run in subtly broken ways. This is likely a build bug."); } + if (version == null) { + throw new IllegalStateException("Error finding the build version. " + + "Stopping Elasticsearch now so it doesn't run in subtly broken ways. This is likely a build bug."); + } - CURRENT = new Build(flavor, type, shortHash, date, isSnapshot); + CURRENT = new Build(flavor, type, shortHash, date, isSnapshot, version); } private final boolean isSnapshot; @@ -175,13 +185,18 @@ static URL getElasticsearchCodeSourceLocation() { private final Type type; private final String shortHash; private final String date; + private final String version; - public Build(final Flavor flavor, final Type type, final String shortHash, final String date, boolean isSnapshot) { + public Build( + final Flavor flavor, final Type type, final String shortHash, final String date, boolean isSnapshot, + String version + ) { this.flavor = flavor; this.type = type; this.shortHash = shortHash; this.date = date; this.isSnapshot = isSnapshot; + this.version = version; } public String shortHash() { @@ -208,7 +223,14 @@ public static Build readBuild(StreamInput in) throws IOException { String hash = in.readString(); String date = in.readString(); boolean snapshot = in.readBoolean(); - return new Build(flavor, type, hash, date, snapshot); + + final String version; + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + version = in.readString(); + } else { + version = in.getVersion().toString(); + } + return new Build(flavor, type, hash, date, snapshot, version); } public static void writeBuild(Build build, StreamOutput out) throws IOException { @@ -221,6 +243,22 @@ public static void writeBuild(Build build, StreamOutput out) throws IOException out.writeString(build.shortHash()); out.writeString(build.date()); out.writeBoolean(build.isSnapshot()); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeString(build.getQualifiedVersion()); + } + } + + /** + * Get the version as considered at build time + * + * Offers a way to get the fully qualified version as configured by the build. + * This will be the same as {@link Version} for production releases, but may include on of the qualifier ( e.x alpha1 ) + * or -SNAPSHOT for others. + * + * @return the fully qualified build + */ + public String getQualifiedVersion() { + return version; } public Flavor flavor() { @@ -235,9 +273,18 @@ public boolean isSnapshot() { return isSnapshot; } + /** + * Provides information about the intent of the build + * + * @return true if the build is intended for production use + */ + public boolean isProductionRelease() { + return version.matches("[0-9]+\\.[0-9]+\\.[0-9]+"); + } + @Override public String toString() { - return "[" + flavor.displayName() + "][" + type.displayName + "][" + shortHash + "][" + date + "]"; + return "[" + flavor.displayName() + "][" + type.displayName + "][" + shortHash + "][" + date + "][" + version +"]"; } @Override @@ -265,13 +312,15 @@ public boolean equals(Object o) { if (!shortHash.equals(build.shortHash)) { return false; } + if (version.equals(build.version) == false) { + return false; + } return date.equals(build.date); - } @Override public int hashCode() { - return Objects.hash(flavor, type, isSnapshot, shortHash, date); + return Objects.hash(flavor, type, isSnapshot, shortHash, date, version); } } diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index c009bb3818cc8..80c0ceedfd87f 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1007,7 +1007,7 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.common.xcontent.UnknownNamedObjectException::new, 148, UNKNOWN_VERSION_ADDED), TOO_MANY_BUCKETS_EXCEPTION(MultiBucketConsumerService.TooManyBucketsException.class, MultiBucketConsumerService.TooManyBucketsException::new, 149, - Version.V_7_0_0_alpha1); + Version.V_7_0_0); final Class exceptionClass; final CheckedFunction constructor; diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 0f636f76d8ae5..854c4aee00ba2 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -105,14 +105,16 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_4_2 = new Version(V_6_4_2_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_4_3_ID = 6040399; public static final Version V_6_4_3 = new Version(V_6_4_3_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); + public static final int V_6_4_4_ID = 6040499; + public static final Version V_6_4_4 = new Version(V_6_4_4_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_5_0_ID = 6050099; public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_6_6_0_ID = 6060099; - public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); - public static final int V_7_0_0_alpha1_ID = 7000001; - public static final Version V_7_0_0_alpha1 = - new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version CURRENT = V_7_0_0_alpha1; + public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); + public static final int V_7_0_0_ID = 7000099; + public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version CURRENT = V_7_0_0; + static { assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to [" @@ -125,12 +127,14 @@ public static Version readVersion(StreamInput in) throws IOException { public static Version fromId(int id) { switch (id) { - case V_7_0_0_alpha1_ID: - return V_7_0_0_alpha1; + case V_7_0_0_ID: + return V_7_0_0; case V_6_6_0_ID: return V_6_6_0; case V_6_5_0_ID: return V_6_5_0; + case V_6_4_4_ID: + return V_6_4_4; case V_6_4_3_ID: return V_6_4_3; case V_6_4_2_ID: @@ -391,7 +395,7 @@ public static void main(String[] args) { final String versionOutput = String.format( Locale.ROOT, "Version: %s, Build: %s/%s/%s/%s, JVM: %s", - Version.displayVersion(Version.CURRENT, Build.CURRENT.isSnapshot()), + Build.CURRENT.getQualifiedVersion(), Build.CURRENT.flavor().displayName(), Build.CURRENT.type().displayName(), Build.CURRENT.shortHash(), @@ -425,10 +429,6 @@ public String toString() { return sb.toString(); } - public static String displayVersion(final Version version, final boolean isSnapshot) { - return version + (isSnapshot ? "-SNAPSHOT" : ""); - } - @Override public boolean equals(Object o) { if (this == o) { diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 7273bf29462ac..4abc01ff0134d 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -296,9 +296,7 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) public XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException { ReplicationResponse.ShardInfo shardInfo = getShardInfo(); builder.field(_INDEX, shardId.getIndexName()); - if (params.paramAsBoolean("include_type_name", true)) { - builder.field(_TYPE, type); - } + builder.field(_TYPE, type); builder.field(_ID, id) .field(_VERSION, version) .field(RESULT, getResult().getLowercase()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java index e465256a0763b..499569a7fb8dc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -226,6 +226,7 @@ public static class OsStats implements ToXContentFragment { final int availableProcessors; final int allocatedProcessors; final ObjectIntHashMap names; + final ObjectIntHashMap prettyNames; final org.elasticsearch.monitor.os.OsStats.Mem mem; /** @@ -233,6 +234,7 @@ public static class OsStats implements ToXContentFragment { */ private OsStats(List nodeInfos, List nodeStatsList) { this.names = new ObjectIntHashMap<>(); + this.prettyNames = new ObjectIntHashMap<>(); int availableProcessors = 0; int allocatedProcessors = 0; for (NodeInfo nodeInfo : nodeInfos) { @@ -242,6 +244,9 @@ private OsStats(List nodeInfos, List nodeStatsList) { if (nodeInfo.getOs().getName() != null) { names.addTo(nodeInfo.getOs().getName(), 1); } + if (nodeInfo.getOs().getPrettyName() != null) { + prettyNames.addTo(nodeInfo.getOs().getPrettyName(), 1); + } } this.availableProcessors = availableProcessors; this.allocatedProcessors = allocatedProcessors; @@ -280,6 +285,8 @@ static final class Fields { static final String ALLOCATED_PROCESSORS = "allocated_processors"; static final String NAME = "name"; static final String NAMES = "names"; + static final String PRETTY_NAME = "pretty_name"; + static final String PRETTY_NAMES = "pretty_names"; static final String COUNT = "count"; } @@ -289,11 +296,27 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors); builder.field(Fields.ALLOCATED_PROCESSORS, allocatedProcessors); builder.startArray(Fields.NAMES); - for (ObjectIntCursor name : names) { - builder.startObject(); - builder.field(Fields.NAME, name.key); - builder.field(Fields.COUNT, name.value); - builder.endObject(); + { + for (ObjectIntCursor name : names) { + builder.startObject(); + { + builder.field(Fields.NAME, name.key); + builder.field(Fields.COUNT, name.value); + } + builder.endObject(); + } + } + builder.endArray(); + builder.startArray(Fields.PRETTY_NAMES); + { + for (final ObjectIntCursor prettyName : prettyNames) { + builder.startObject(); + { + builder.field(Fields.PRETTY_NAME, prettyName.key); + builder.field(Fields.COUNT, prettyName.value); + } + builder.endObject(); + } } builder.endArray(); mem.toXContent(builder, params); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index 8d2ca9886d000..8ef16012cf308 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -239,7 +239,7 @@ public AliasActions(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_6_4_0)) { writeIndex = in.readOptionalBoolean(); } - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { originalAliases = in.readStringArray(); } } @@ -256,7 +256,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeOptionalBoolean(writeIndex); } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { out.writeStringArray(originalAliases); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index fa2a395f2c9e4..1bbce19ee8dec 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -460,7 +460,7 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < aliasesSize; i++) { aliases.add(Alias.read(in)); } - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + if (in.getVersion().before(Version.V_7_0_0)) { in.readBoolean(); // updateAllTypes } waitForActiveShards = ActiveShardCount.readFrom(in); @@ -485,7 +485,7 @@ public void writeTo(StreamOutput out) throws IOException { for (Alias alias : aliases) { alias.writeTo(out); } - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + if (out.getVersion().before(Version.V_7_0_0)) { out.writeBoolean(true); // updateAllTypes } waitForActiveShards.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index a827444acb8c5..926ae175d65ad 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -297,7 +297,7 @@ public void readFrom(StreamInput in) throws IOException { indicesOptions = IndicesOptions.readIndicesOptions(in); type = in.readOptionalString(); source = in.readString(); - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + if (in.getVersion().before(Version.V_7_0_0)) { in.readBoolean(); // updateAllTypes } concreteIndex = in.readOptionalWriteable(Index::new); @@ -310,7 +310,7 @@ public void writeTo(StreamOutput out) throws IOException { indicesOptions.writeIndicesOptions(out); out.writeOptionalString(type); out.writeString(source); - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + if (out.getVersion().before(Version.V_7_0_0)) { out.writeBoolean(true); // updateAllTypes } out.writeOptionalWriteable(concreteIndex); diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index 165aa7afd9e2e..53ec854a9e21b 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; @@ -83,13 +84,13 @@ public DeleteRequest(String index, String type, String id) { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validate(); - if (type == null) { + if (Strings.isEmpty(type)) { validationException = addValidationError("type is missing", validationException); } - if (id == null) { + if (Strings.isEmpty(id)) { validationException = addValidationError("id is missing", validationException); } - if (!versionType.validateVersionForWrites(version)) { + if (versionType.validateVersionForWrites(version) == false) { validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); } @@ -188,7 +189,7 @@ public void readFrom(StreamInput in) throws IOException { type = in.readString(); id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + if (in.getVersion().before(Version.V_7_0_0)) { in.readOptionalString(); // _parent } version = in.readLong(); @@ -201,7 +202,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(type); out.writeString(id); out.writeOptionalString(routing()); - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + if (out.getVersion().before(Version.V_7_0_0)) { out.writeOptionalString(null); // _parent } out.writeLong(version); diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java index 6fdf355c0670c..ee18dfd8d2140 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java @@ -34,6 +34,8 @@ import java.io.IOException; +import static org.elasticsearch.action.ValidateActions.addValidationError; + /** * Explain request encapsulating the explain query and document identifier to get an explanation for. */ @@ -152,11 +154,11 @@ public ExplainRequest filteringAlias(AliasFilter filteringAlias) { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validateNonNullIndex(); - if (type == null) { - validationException = ValidateActions.addValidationError("type is missing", validationException); + if (Strings.isEmpty(type)) { + validationException = addValidationError("type is missing", validationException); } - if (id == null) { - validationException = ValidateActions.addValidationError("id is missing", validationException); + if (Strings.isEmpty(id)) { + validationException = addValidationError("id is missing", validationException); } if (query == null) { validationException = ValidateActions.addValidationError("query is missing", validationException); diff --git a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java index 090935107a778..5162fd46ecab0 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.single.shard.SingleShardRequest; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; @@ -33,6 +34,8 @@ import java.io.IOException; +import static org.elasticsearch.action.ValidateActions.addValidationError; + /** * A request to get a document (its source) from an index based on its type (optional) and id. Best created using * {@link org.elasticsearch.client.Requests#getRequest(String)}. @@ -91,13 +94,13 @@ public GetRequest(String index, String type, String id) { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validateNonNullIndex(); - if (type == null) { - validationException = ValidateActions.addValidationError("type is missing", validationException); + if (Strings.isEmpty(type)) { + validationException = addValidationError("type is missing", validationException); } - if (id == null) { - validationException = ValidateActions.addValidationError("id is missing", validationException); + if (Strings.isEmpty(id)) { + validationException = addValidationError("id is missing", validationException); } - if (!versionType.validateVersionForReads(version)) { + if (versionType.validateVersionForReads(version) == false) { validationException = ValidateActions.addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); } @@ -245,7 +248,7 @@ public void readFrom(StreamInput in) throws IOException { type = in.readString(); id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + if (in.getVersion().before(Version.V_7_0_0)) { in.readOptionalString(); } preference = in.readOptionalString(); @@ -264,7 +267,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(type); out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + if (out.getVersion().before(Version.V_7_0_0)) { out.writeOptionalString(null); } out.writeOptionalString(preference); diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index df23bad10bce7..920049867968c 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -191,7 +191,7 @@ public void readFrom(StreamInput in) throws IOException { type = in.readOptionalString(); id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + if (in.getVersion().before(Version.V_7_0_0)) { in.readOptionalString(); // _parent } storedFields = in.readOptionalStringArray(); @@ -207,7 +207,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(type); out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + if (out.getVersion().before(Version.V_7_0_0)) { out.writeOptionalString(null); // _parent } out.writeOptionalStringArray(storedFields); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 8f5fd156018a2..710ae331b99f4 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -514,7 +514,7 @@ public void readFrom(StreamInput in) throws IOException { type = in.readOptionalString(); id = in.readOptionalString(); routing = in.readOptionalString(); - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + if (in.getVersion().before(Version.V_7_0_0)) { in.readOptionalString(); // _parent } if (in.getVersion().before(Version.V_6_0_0_alpha1)) { @@ -541,7 +541,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(type); out.writeOptionalString(id); out.writeOptionalString(routing); - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + if (out.getVersion().before(Version.V_7_0_0)) { out.writeOptionalString(null); // _parent } if (out.getVersion().before(Version.V_6_0_0_alpha1)) { diff --git a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java index 6cae1056a4b7b..4b06a2d0613df 100644 --- a/server/src/main/java/org/elasticsearch/action/main/MainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/main/MainResponse.java @@ -61,6 +61,7 @@ public Version getVersion() { return version; } + public ClusterName getClusterName() { return clusterName; } @@ -81,7 +82,7 @@ public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); out.writeString(clusterUuid); Build.writeBuild(build, out); - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + if (out.getVersion().before(Version.V_7_0_0)) { out.writeBoolean(true); } } @@ -94,7 +95,7 @@ public void readFrom(StreamInput in) throws IOException { clusterName = new ClusterName(in); clusterUuid = in.readString(); build = Build.readBuild(in); - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + if (in.getVersion().before(Version.V_7_0_0)) { in.readBoolean(); } } @@ -112,6 +113,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws .field("build_hash", build.shortHash()) .field("build_date", build.date()) .field("build_snapshot", build.isSnapshot()) + .field("qualified", build.getQualifiedVersion()) .field("lucene_version", version.luceneVersion.toString()) .field("minimum_wire_compatibility_version", version.minimumCompatibilityVersion().toString()) .field("minimum_index_compatibility_version", version.minimumIndexCompatibilityVersion().toString()) @@ -138,7 +140,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType), (String) value.get("build_hash"), (String) value.get("build_date"), - (boolean) value.get("build_snapshot")); + (boolean) value.get("build_snapshot"), + (String) value.get("qualified") + ); response.version = Version.fromString((String) value.get("number")); }, (parser, context) -> parser.map(), new ParseField("version")); } diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index f2b1b0d5c6265..ac17ec1e579bc 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -170,7 +170,7 @@ public void readFrom(StreamInput in) throws IOException { for (int i = 0; i < items.length; i++) { items[i] = Item.readItem(in); } - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { tookInMillis = in.readVLong(); } } @@ -182,7 +182,7 @@ public void writeTo(StreamOutput out) throws IOException { for (Item item : items) { item.writeTo(out); } - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { out.writeVLong(tookInMillis); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 445ed643ca527..c38c2a1c69a7f 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -248,7 +248,7 @@ public boolean ignoreAliases() { /** * - * @return whether indices that are marked as throttled should be ignored when resolving a wildcard or alias + * @return whether indices that are marked as throttled should be ignored */ public boolean ignoreThrottled() { return options.contains(Option.IGNORE_THROTTLED); @@ -256,8 +256,8 @@ public boolean ignoreThrottled() { public void writeIndicesOptions(StreamOutput out) throws IOException { EnumSet

    - *
  • null if the uid wasn't found, - *
  • a doc ID and the associated seqNo otherwise - *
+ * Loads the internal docId and sequence number of the latest copy for a given uid from the provided reader. + * The flag {@link DocIdAndSeqNo#isLive} indicates whether the returned document is live or (soft)deleted. + * This returns {@code null} if no such document matching the given term uid. */ public static DocIdAndSeqNo loadDocIdAndSeqNo(IndexReader reader, Term term) throws IOException { - PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field()); - List leaves = reader.leaves(); + final PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field()); + final List leaves = reader.leaves(); + DocIdAndSeqNo latest = null; // iterate backwards to optimize for the frequently updated documents // which are likely to be in the last segments for (int i = leaves.size() - 1; i >= 0; i--) { final LeafReaderContext leaf = leaves.get(i); - PerThreadIDVersionAndSeqNoLookup lookup = lookups[leaf.ord]; - DocIdAndSeqNo result = lookup.lookupSeqNo(term.bytes(), leaf); - if (result != null) { + final PerThreadIDVersionAndSeqNoLookup lookup = lookups[leaf.ord]; + final DocIdAndSeqNo result = lookup.lookupSeqNo(term.bytes(), leaf); + if (result == null) { + continue; + } + if (result.isLive) { + // The live document must always be the latest copy, thus we can early terminate here. + assert latest == null || latest.seqNo <= result.seqNo : + "the live doc does not have the highest seq_no; live_seq_no=" + result.seqNo + " < deleted_seq_no=" + latest.seqNo; return result; } + if (latest == null || latest.seqNo < result.seqNo) { + latest = result; + } } - return null; - } - - /** - * Load the primaryTerm associated with the given {@link DocIdAndSeqNo} - */ - public static long loadPrimaryTerm(DocIdAndSeqNo docIdAndSeqNo, String uidField) throws IOException { - NumericDocValues primaryTerms = docIdAndSeqNo.context.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); - long result; - if (primaryTerms != null && primaryTerms.advanceExact(docIdAndSeqNo.docId)) { - result = primaryTerms.longValue(); - } else { - result = 0; - } - assert result > 0 : "should always resolve a primary term for a resolved sequence number. primary_term [" + result + "]" - + " docId [" + docIdAndSeqNo.docId + "] seqNo [" + docIdAndSeqNo.seqNo + "]"; - return result; + return latest; } /** diff --git a/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java b/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java index 6b89a90aa2c77..4fc3a0f6bb6bd 100644 --- a/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java +++ b/server/src/main/java/org/elasticsearch/common/network/CloseableChannel.java @@ -26,7 +26,6 @@ import java.io.Closeable; import java.io.IOException; -import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -90,7 +89,7 @@ static void closeChannels(List channels, boolean IOUtils.close(channels); } catch (IOException e) { // The CloseableChannel#close method does not throw IOException, so this should not occur. - throw new UncheckedIOException(e); + throw new AssertionError(e); } if (blocking) { ArrayList> futures = new ArrayList<>(channels.size()); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java index 422941acc73e5..50e1844609df5 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/ShingleTokenFilterFactory.java @@ -40,7 +40,7 @@ public ShingleTokenFilterFactory(IndexSettings indexSettings, Environment enviro int shingleDiff = maxShingleSize - minShingleSize + (outputUnigrams ? 1 : 0); if (shingleDiff > maxAllowedShingleDiff) { - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0)) { throw new IllegalArgumentException( "In Shingle TokenFilter the difference between max_shingle_size and min_shingle_size (and +1 if outputting unigrams)" + " must be less than or equal to: [" + maxAllowedShingleDiff + "] but was [" + shingleDiff + "]. This limit" diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 26808330986da..89545af641c28 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -793,7 +793,7 @@ public final CommitStats commitStats() { /** * Global stats on segments. */ - public final SegmentsStats segmentsStats(boolean includeSegmentFileSizes) { + public SegmentsStats segmentsStats(boolean includeSegmentFileSizes) { ensureOpen(); Set segmentName = new HashSet<>(); SegmentsStats stats = new SegmentsStats(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index c58e13d65deb6..31d1cfb660f0e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -666,31 +666,32 @@ private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnSeqNo(final Operation op) VersionValue versionValue = getVersionFromMap(op.uid().bytes()); assert incrementVersionLookup(); if (versionValue != null) { - if (op.seqNo() > versionValue.seqNo || - (op.seqNo() == versionValue.seqNo && op.primaryTerm() > versionValue.term)) + if (op.seqNo() > versionValue.seqNo) { status = OpVsLuceneDocStatus.OP_NEWER; - else { + } else if (op.seqNo() == versionValue.seqNo) { + assert versionValue.term == op.primaryTerm() : "primary term not matched; id=" + op.id() + " seq_no=" + op.seqNo() + + " op_term=" + op.primaryTerm() + " existing_term=" + versionValue.term; + status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; + } else { status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; } } else { // load from index assert incrementIndexVersionLookup(); try (Searcher searcher = acquireSearcher("load_seq_no", SearcherScope.INTERNAL)) { - DocIdAndSeqNo docAndSeqNo = VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.reader(), op.uid()); + final DocIdAndSeqNo docAndSeqNo = VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.reader(), op.uid()); if (docAndSeqNo == null) { status = OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND; } else if (op.seqNo() > docAndSeqNo.seqNo) { - status = OpVsLuceneDocStatus.OP_NEWER; - } else if (op.seqNo() == docAndSeqNo.seqNo) { - assert localCheckpointTracker.contains(op.seqNo()) || softDeleteEnabled == false : - "local checkpoint tracker is not updated seq_no=" + op.seqNo() + " id=" + op.id(); - // load term to tie break - final long existingTerm = VersionsAndSeqNoResolver.loadPrimaryTerm(docAndSeqNo, op.uid().field()); - if (op.primaryTerm() > existingTerm) { + if (docAndSeqNo.isLive) { status = OpVsLuceneDocStatus.OP_NEWER; } else { - status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; + status = OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND; } + } else if (op.seqNo() == docAndSeqNo.seqNo) { + assert localCheckpointTracker.contains(op.seqNo()) || softDeleteEnabled == false : + "local checkpoint tracker is not updated seq_no=" + op.seqNo() + " id=" + op.id(); + status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; } else { status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/RamAccountingSearcherFactory.java b/server/src/main/java/org/elasticsearch/index/engine/RamAccountingSearcherFactory.java index 7972d426fba02..9630485cbc105 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/RamAccountingSearcherFactory.java +++ b/server/src/main/java/org/elasticsearch/index/engine/RamAccountingSearcherFactory.java @@ -48,6 +48,11 @@ final class RamAccountingSearcherFactory extends SearcherFactory { @Override public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException { + processReaders(reader, previousReader); + return super.newSearcher(reader, previousReader); + } + + public void processReaders(IndexReader reader, IndexReader previousReader) { final CircuitBreaker breaker = breakerService.getBreaker(CircuitBreaker.ACCOUNTING); // Construct a list of the previous segment readers, we only want to track memory used @@ -79,6 +84,5 @@ public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) segmentReader.getCoreCacheHelper().addClosedListener(k -> breaker.addWithoutBreaking(-ramBytesUsed)); } } - return super.newSearcher(reader, previousReader); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index 26ef259a1e1c6..fc4b0632c8076 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -57,7 +57,7 @@ * * @see #ReadOnlyEngine(EngineConfig, SeqNoStats, TranslogStats, boolean, Function) */ -public final class ReadOnlyEngine extends Engine { +public class ReadOnlyEngine extends Engine { private final SegmentInfos lastCommittedSegmentInfos; private final SeqNoStats seqNoStats; @@ -66,6 +66,7 @@ public final class ReadOnlyEngine extends Engine { private final IndexCommit indexCommit; private final Lock indexWriterLock; private final DocsStats docsStats; + protected final RamAccountingSearcherFactory searcherFactory; /** * Creates a new ReadOnlyEngine. This ctor can also be used to open a read-only engine on top of an already opened @@ -82,6 +83,7 @@ public final class ReadOnlyEngine extends Engine { public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats translogStats, boolean obtainLock, Function readerWrapperFunction) { super(config); + this.searcherFactory = new RamAccountingSearcherFactory(engineConfig.getCircuitBreakerService()); try { Store store = config.getStore(); store.incRef(); @@ -96,14 +98,10 @@ public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats this.lastCommittedSegmentInfos = Lucene.readSegmentInfos(directory); this.translogStats = translogStats == null ? new TranslogStats(0, 0, 0, 0, 0) : translogStats; this.seqNoStats = seqNoStats == null ? buildSeqNoStats(lastCommittedSegmentInfos) : seqNoStats; - reader = ElasticsearchDirectoryReader.wrap(open(directory), config.getShardId()); - if (config.getIndexSettings().isSoftDeleteEnabled()) { - reader = new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD); - } - reader = readerWrapperFunction.apply(reader); - this.indexCommit = reader.getIndexCommit(); - this.searcherManager = new SearcherManager(reader, - new RamAccountingSearcherFactory(engineConfig.getCircuitBreakerService())); + this.indexCommit = Lucene.getIndexCommit(lastCommittedSegmentInfos, directory); + reader = open(indexCommit); + reader = wrapReader(reader, readerWrapperFunction); + searcherManager = new SearcherManager(reader, searcherFactory); this.docsStats = docsStats(lastCommittedSegmentInfos); this.indexWriterLock = indexWriterLock; success = true; @@ -117,8 +115,17 @@ public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats } } - protected DirectoryReader open(final Directory directory) throws IOException { - return DirectoryReader.open(directory); + protected final DirectoryReader wrapReader(DirectoryReader reader, + Function readerWrapperFunction) throws IOException { + reader = ElasticsearchDirectoryReader.wrap(reader, engineConfig.getShardId()); + if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { + reader = new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD); + } + return readerWrapperFunction.apply(reader); + } + + protected DirectoryReader open(IndexCommit commit) throws IOException { + return DirectoryReader.open(commit); } private DocsStats docsStats(final SegmentInfos lastCommittedSegmentInfos) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java index 167ec9ce26b33..c7e11e85f7da5 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java @@ -54,13 +54,13 @@ final class TranslogLeafReader extends LeafReader { private final Translog.Index operation; private static final FieldInfo FAKE_SOURCE_FIELD = new FieldInfo(SourceFieldMapper.NAME, 1, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0, 0, false); + 0, 0, 0, false); private static final FieldInfo FAKE_ROUTING_FIELD = new FieldInfo(RoutingFieldMapper.NAME, 2, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0, 0, false); + 0, 0, 0, false); private static final FieldInfo FAKE_ID_FIELD = new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), - 0, 0, false); + 0, 0, 0, false); private final Version indexVersionCreated; TranslogLeafReader(Translog.Index operation, Version indexVersionCreated) { diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/RamAccountingTermsEnum.java b/server/src/main/java/org/elasticsearch/index/fielddata/RamAccountingTermsEnum.java index 57b388b89c02d..40b07d0a15c90 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/RamAccountingTermsEnum.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/RamAccountingTermsEnum.java @@ -27,7 +27,7 @@ import java.io.IOException; /** - * {@link TermsEnum} that takes a MemoryCircuitBreaker, increasing the breaker + * {@link TermsEnum} that takes a CircuitBreaker, increasing the breaker * every time {@code .next(...)} is called. Proxies all methods to the original * TermsEnum otherwise. */ diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexFieldData.java index c519c6634a20f..5eaaf14f8bcb5 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractIndexFieldData.java @@ -86,7 +86,7 @@ public FD load(LeafReaderContext context) { * A {@code PerValueEstimator} is a sub-class that can be used to estimate * the memory overhead for loading the data. Each field data * implementation should implement its own {@code PerValueEstimator} if it - * intends to take advantage of the MemoryCircuitBreaker. + * intends to take advantage of the CircuitBreaker. *

* Note that the .beforeLoad(...) and .afterLoad(...) methods must be * manually called. diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index ba5c4cd929fd2..ba70c7035506b 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -256,9 +256,7 @@ public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(_INDEX, index); - if (params.paramAsBoolean("include_type_name", true)) { - builder.field(_TYPE, type); - } + builder.field(_TYPE, type); builder.field(_ID, id); if (isExists()) { if (version != -1) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 04480de70a8e4..eaafeefa7e0dd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -409,7 +409,7 @@ protected final void failIfNoDocValues() { } protected final void failIfNotIndexed() { - if (indexOptions() == IndexOptions.NONE && pointDimensionCount() == 0) { + if (indexOptions() == IndexOptions.NONE && pointDataDimensionCount() == 0) { // we throw an IAE rather than an ISE so that it translates to a 4xx code rather than 5xx code on the http layer throw new IllegalArgumentException("Cannot search on field [" + name() + "] since it is not indexed."); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 828b5b956f5de..076f6e7ebe030 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -154,7 +154,7 @@ public MapperService(IndexSettings indexSettings, IndexAnalyzers indexAnalyzers, this.mapperRegistry = mapperRegistry; if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings()) && - indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0)) { throw new IllegalArgumentException("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " was removed after version 6.0.0"); } @@ -411,7 +411,7 @@ private synchronized Map internalMerge(@Nullable Documen Map results = new LinkedHashMap<>(2); if (defaultMapper != null) { - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_7_0_0)) { throw new IllegalArgumentException("The [default] mapping cannot be updated on index [" + index().getName() + "]: defaults mappings are not useful anymore now that indices can have at most one type."); } else if (reason == MergeReason.MAPPING_UPDATE) { // only log in case of explicit mapping updates diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 7851bb1655ad0..d0419a0e44b24 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -268,7 +268,7 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { - return new TokenStreamComponents(components.getTokenizer(), new FixedShingleFilter(components.getTokenStream(), 2)); + return new TokenStreamComponents(components.getSource(), new FixedShingleFilter(components.getTokenStream(), 2)); } } @@ -293,7 +293,7 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { TokenFilter filter = new EdgeNGramTokenFilter(components.getTokenStream(), minChars, maxChars, false); - return new TokenStreamComponents(components.getTokenizer(), filter); + return new TokenStreamComponents(components.getSource(), filter); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index baf60bbbc0912..b275088d89441 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -124,4 +124,15 @@ public void onFailure(Exception e) { } } + /** + * In pre-processing contexts that happen at index time 'now' date ranges should be replaced by a {@link MatchAllQueryBuilder}. + * Otherwise documents that should match at query time would never match and the document that have fallen outside the + * date range would continue to match. + * + * @return indicates whether range queries with date ranges using 'now' are rewritten to a {@link MatchAllQueryBuilder}. + */ + public boolean convertNowRangeToMatchAll() { + return false; + } + } diff --git a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index 756c6456a9f13..6b8a47e8ce23e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -459,6 +459,16 @@ protected MappedFieldType.Relation getRelation(QueryRewriteContext queryRewriteC @Override protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + // Percolator queries get rewritten and pre-processed at index time. + // If a range query has a date range using 'now' and 'now' gets resolved at index time then + // the pre-processing uses that to pre-process. This can then lead to mismatches at query time. + if (queryRewriteContext.convertNowRangeToMatchAll()) { + if ((from() != null && from().toString().contains("now")) || + (to() != null && to().toString().contains("now"))) { + return new MatchAllQueryBuilder(); + } + } + final MappedFieldType.Relation relation = getRelation(queryRewriteContext); switch (relation) { case DISJOINT: diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java index d3bac583eac68..399e610a54edf 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryParserHelper.java @@ -24,6 +24,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.SearchModule; import java.util.Collection; import java.util.HashMap; @@ -85,7 +86,7 @@ public static Map resolveMappingFields(QueryShardContext context, !multiField, !allField, fieldSuffix); resolvedFields.putAll(fieldMap); } - checkForTooManyFields(resolvedFields); + checkForTooManyFields(resolvedFields, context); return resolvedFields; } @@ -141,7 +142,7 @@ public static Map resolveMappingField(QueryShardContext context, if (acceptAllTypes == false) { try { fieldType.termQuery("", context); - } catch (QueryShardException |UnsupportedOperationException e) { + } catch (QueryShardException | UnsupportedOperationException e) { // field type is never searchable with term queries (eg. geo point): ignore continue; } catch (IllegalArgumentException |ElasticsearchParseException e) { @@ -150,13 +151,14 @@ public static Map resolveMappingField(QueryShardContext context, } fields.put(fieldName, weight); } - checkForTooManyFields(fields); + checkForTooManyFields(fields, context); return fields; } - private static void checkForTooManyFields(Map fields) { - if (fields.size() > 1024) { - throw new IllegalArgumentException("field expansion matches too many fields, limit: 1024, got: " + fields.size()); + private static void checkForTooManyFields(Map fields, QueryShardContext context) { + Integer limit = SearchModule.INDICES_MAX_CLAUSE_COUNT_SETTING.get(context.getIndexSettings().getSettings()); + if (fields.size() > limit) { + throw new IllegalArgumentException("field expansion matches too many fields, limit: " + limit + ", got: " + fields.size()); } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 17756630517d2..921eeb319f1a0 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -2286,23 +2286,32 @@ private void bumpPrimaryTerm(long newPrimaryTerm, final Ch assert newPrimaryTerm > pendingPrimaryTerm; assert operationPrimaryTerm <= pendingPrimaryTerm; final CountDownLatch termUpdated = new CountDownLatch(1); - indexShardOperationPermits.asyncBlockOperations(30, TimeUnit.MINUTES, () -> { - assert operationPrimaryTerm <= pendingPrimaryTerm; - termUpdated.await(); - // indexShardOperationPermits doesn't guarantee that async submissions are executed - // in the order submitted. We need to guard against another term bump - if (operationPrimaryTerm < newPrimaryTerm) { - operationPrimaryTerm = newPrimaryTerm; - onBlocked.run(); - } - }, - e -> { + indexShardOperationPermits.asyncBlockOperations(new ActionListener() { + @Override + public void onFailure(final Exception e) { try { failShard("exception during primary term transition", e); } catch (AlreadyClosedException ace) { // ignore, shard is already closed } - }); + } + + @Override + public void onResponse(final Releasable releasable) { + try (Releasable ignored = releasable) { + assert operationPrimaryTerm <= pendingPrimaryTerm; + termUpdated.await(); + // indexShardOperationPermits doesn't guarantee that async submissions are executed + // in the order submitted. We need to guard against another term bump + if (operationPrimaryTerm < newPrimaryTerm) { + operationPrimaryTerm = newPrimaryTerm; + onBlocked.run(); + } + } catch (final Exception e) { + onFailure(e); + } + } + }, 30, TimeUnit.MINUTES); pendingPrimaryTerm = newPrimaryTerm; termUpdated.countDown(); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java index d4c3833b13a58..67c48c38791f0 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPermits.java @@ -41,7 +41,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -104,42 +103,54 @@ void blockOperations( final TimeUnit timeUnit, final CheckedRunnable onBlocked) throws InterruptedException, TimeoutException, E { delayOperations(); - try { - doBlockOperations(timeout, timeUnit, onBlocked); + try (Releasable ignored = acquireAll(timeout, timeUnit)) { + onBlocked.run(); } finally { releaseDelayedOperations(); } } /** - * Immediately delays operations and on another thread waits for in-flight operations to finish and then executes {@code onBlocked} - * under the guarantee that no new operations are started. Delayed operations are run after {@code onBlocked} has executed. After - * operations are delayed and the blocking is forked to another thread, returns to the caller. If a failure occurs while blocking - * operations or executing {@code onBlocked} then the {@code onFailure} handler will be invoked. + * Immediately delays operations and on another thread waits for in-flight operations to finish and then acquires all permits. When all + * permits are acquired, the provided {@link ActionListener} is called under the guarantee that no new operations are started. Delayed + * operations are run once the {@link Releasable} is released or if a failure occurs while acquiring all permits; in this case the + * {@code onFailure} handler will be invoked after delayed operations are released. * - * @param timeout the maximum time to wait for the in-flight operations block - * @param timeUnit the time unit of the {@code timeout} argument - * @param onBlocked the action to run once the block has been acquired - * @param onFailure the action to run if a failure occurs while blocking operations - * @param the type of checked exception thrown by {@code onBlocked} (not thrown on the calling thread) + * @param onAcquired {@link ActionListener} that is invoked once acquisition is successful or failed + * @param timeout the maximum time to wait for the in-flight operations block + * @param timeUnit the time unit of the {@code timeout} argument */ - void asyncBlockOperations( - final long timeout, final TimeUnit timeUnit, final CheckedRunnable onBlocked, final Consumer onFailure) { + public void asyncBlockOperations(final ActionListener onAcquired, final long timeout, final TimeUnit timeUnit) { delayOperations(); threadPool.executor(ThreadPool.Names.GENERIC).execute(new AbstractRunnable() { + + final AtomicBoolean released = new AtomicBoolean(false); + @Override public void onFailure(final Exception e) { - onFailure.accept(e); + try { + releaseDelayedOperationsIfNeeded(); // resume delayed operations as soon as possible + } finally { + onAcquired.onFailure(e); + } } @Override protected void doRun() throws Exception { - doBlockOperations(timeout, timeUnit, onBlocked); + final Releasable releasable = acquireAll(timeout, timeUnit); + onAcquired.onResponse(() -> { + try { + releasable.close(); + } finally { + releaseDelayedOperationsIfNeeded(); + } + }); } - @Override - public void onAfter() { - releaseDelayedOperations(); + private void releaseDelayedOperationsIfNeeded() { + if (released.compareAndSet(false, true)) { + releaseDelayedOperations(); + } } }); } @@ -154,10 +165,7 @@ private void delayOperations() { } } - private void doBlockOperations( - final long timeout, - final TimeUnit timeUnit, - final CheckedRunnable onBlocked) throws InterruptedException, TimeoutException, E { + private Releasable acquireAll(final long timeout, final TimeUnit timeUnit) throws InterruptedException, TimeoutException { if (Assertions.ENABLED) { // since delayed is not volatile, we have to synchronize even here for visibility synchronized (this) { @@ -165,12 +173,13 @@ private void doBlockOperations( } } if (semaphore.tryAcquire(TOTAL_PERMITS, timeout, timeUnit)) { - assert semaphore.availablePermits() == 0; - try { - onBlocked.run(); - } finally { - semaphore.release(TOTAL_PERMITS); - } + final AtomicBoolean closed = new AtomicBoolean(); + return () -> { + if (closed.compareAndSet(false, true)) { + assert semaphore.availablePermits() == 0; + semaphore.release(TOTAL_PERMITS); + } + }; } else { throw new TimeoutException("timeout while blocking operations"); } diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java index aba860337f3df..0473c6aeaf551 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java @@ -139,7 +139,7 @@ private static BasicModel parseBasicModel(Version indexCreatedVersion, Settings if (model == null) { String replacement = LEGACY_BASIC_MODELS.get(basicModel); if (replacement != null) { - if (indexCreatedVersion.onOrAfter(Version.V_7_0_0_alpha1)) { + if (indexCreatedVersion.onOrAfter(Version.V_7_0_0)) { throw new IllegalArgumentException("Basic model [" + basicModel + "] isn't supported anymore, " + "please use another model."); } else { @@ -170,7 +170,7 @@ private static AfterEffect parseAfterEffect(Version indexCreatedVersion, Setting if (effect == null) { String replacement = LEGACY_AFTER_EFFECTS.get(afterEffect); if (replacement != null) { - if (indexCreatedVersion.onOrAfter(Version.V_7_0_0_alpha1)) { + if (indexCreatedVersion.onOrAfter(Version.V_7_0_0)) { throw new IllegalArgumentException("After effect [" + afterEffect + "] isn't supported anymore, please use another effect."); } else { @@ -261,7 +261,7 @@ static void assertSettingsIsSubsetOf(String type, Version version, Settings sett unknownSettings.removeAll(Arrays.asList(supportedSettings)); unknownSettings.remove("type"); // used to figure out which sim this is if (unknownSettings.isEmpty() == false) { - if (version.onOrAfter(Version.V_7_0_0_alpha1)) { + if (version.onOrAfter(Version.V_7_0_0)) { throw new IllegalArgumentException("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); } else { deprecationLogger.deprecated("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index d7308c424be8e..44956eec35d9b 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -59,7 +59,7 @@ public final class SimilarityService extends AbstractIndexComponent { static { Map>> defaults = new HashMap<>(); defaults.put(CLASSIC_SIMILARITY, version -> { - if (version.onOrAfter(Version.V_7_0_0_alpha1)) { + if (version.onOrAfter(Version.V_7_0_0)) { return () -> { throw new IllegalArgumentException("The [classic] similarity may not be used anymore. Please use the [BM25] " + "similarity or build a custom [scripted] similarity instead."); @@ -86,7 +86,7 @@ public final class SimilarityService extends AbstractIndexComponent { Map> builtIn = new HashMap<>(); builtIn.put(CLASSIC_SIMILARITY, (settings, version, script) -> { - if (version.onOrAfter(Version.V_7_0_0_alpha1)) { + if (version.onOrAfter(Version.V_7_0_0)) { throw new IllegalArgumentException("The [classic] similarity may not be used anymore. Please use the [BM25] " + "similarity or build a custom [scripted] similarity instead."); } else { @@ -267,7 +267,7 @@ private static void validateScoresDoNotIncreaseWithNorm(Version indexCreatedVers } private static void fail(Version indexCreatedVersion, String message) { - if (indexCreatedVersion.onOrAfter(Version.V_7_0_0_alpha1)) { + if (indexCreatedVersion.onOrAfter(Version.V_7_0_0)) { throw new IllegalArgumentException(message); } else if (indexCreatedVersion.onOrAfter(Version.V_6_5_0)) { deprecationLogger.deprecated(message); diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index bc77626b94277..447ec9003a4ac 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -22,8 +22,9 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.memory.MemoryIndex; @@ -98,7 +99,7 @@ static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequ try (Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), false, request.type(), request.id(), uidTerm) .version(request.version()).versionType(request.versionType())); Engine.Searcher searcher = indexShard.acquireSearcher("term_vector")) { - Fields topLevelFields = MultiFields.getFields(get.searcher() != null ? get.searcher().reader() : searcher.reader()); + Fields topLevelFields = fields(get.searcher() != null ? get.searcher().reader() : searcher.reader()); DocIdAndVersion docIdAndVersion = get.docIdAndVersion(); /* from an artificial document */ if (request.doc() != null) { @@ -152,6 +153,25 @@ else if (docIdAndVersion != null) { return termVectorsResponse; } + public static Fields fields(IndexReader reader) { + return new Fields() { + @Override + public Iterator iterator() { + throw new UnsupportedOperationException(); + } + + @Override + public Terms terms(String field) throws IOException { + return MultiTerms.getTerms(reader, field); + } + + @Override + public int size() { + throw new UnsupportedOperationException(); + } + }; + } + private static void handleFieldWildcards(IndexShard indexShard, TermVectorsRequest request) { Set fieldNames = new HashSet<>(); for (String pattern : request.selectedFields()) { @@ -270,7 +290,7 @@ private static Fields generateTermVectors(IndexShard indexShard, Map> setupTokenFilters(Li tokenFilters.register("standard", new AnalysisProvider() { @Override public TokenFilterFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - if (indexSettings.getIndexVersionCreated().before(Version.V_7_0_0_alpha1)) { + if (indexSettings.getIndexVersionCreated().before(Version.V_7_0_0)) { deprecationLogger.deprecatedAndMaybeLog("standard_deprecation", "The [standard] token filter name is deprecated and will be removed in a future version."); } else { @@ -182,7 +182,7 @@ static Map setupPreConfiguredTokenFilters(List // Add "standard" for old indices (bwc) preConfiguredTokenFilters.register( "standard", PreConfiguredTokenFilter.singletonWithVersion("standard", true, (reader, version) -> { - if (version.before(Version.V_7_0_0_alpha1)) { + if (version.before(Version.V_7_0_0)) { deprecationLogger.deprecatedAndMaybeLog("standard_deprecation", "The [standard] token filter is deprecated and will be removed in a future version."); } else { diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java b/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java index 0c81356167440..e7d02cb1a0902 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java @@ -19,11 +19,11 @@ package org.elasticsearch.monitor.os; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -35,14 +35,23 @@ public class OsInfo implements Writeable, ToXContentFragment { private final int availableProcessors; private final int allocatedProcessors; private final String name; + private final String prettyName; private final String arch; private final String version; - public OsInfo(long refreshInterval, int availableProcessors, int allocatedProcessors, String name, String arch, String version) { + public OsInfo( + final long refreshInterval, + final int availableProcessors, + final int allocatedProcessors, + final String name, + final String prettyName, + final String arch, + final String version) { this.refreshInterval = refreshInterval; this.availableProcessors = availableProcessors; this.allocatedProcessors = allocatedProcessors; this.name = name; + this.prettyName = prettyName; this.arch = arch; this.version = version; } @@ -52,6 +61,11 @@ public OsInfo(StreamInput in) throws IOException { this.availableProcessors = in.readInt(); this.allocatedProcessors = in.readInt(); this.name = in.readOptionalString(); + if (in.getVersion().onOrAfter(Version.V_6_6_0)) { + this.prettyName = in.readOptionalString(); + } else { + this.prettyName = null; + } this.arch = in.readOptionalString(); this.version = in.readOptionalString(); } @@ -62,6 +76,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(availableProcessors); out.writeInt(allocatedProcessors); out.writeOptionalString(name); + if (out.getVersion().onOrAfter(Version.V_6_6_0)) { + out.writeOptionalString(prettyName); + } out.writeOptionalString(arch); out.writeOptionalString(version); } @@ -82,6 +99,10 @@ public String getName() { return name; } + public String getPrettyName() { + return prettyName; + } + public String getArch() { return arch; } @@ -93,6 +114,7 @@ public String getVersion() { static final class Fields { static final String OS = "os"; static final String NAME = "name"; + static final String PRETTY_NAME = "pretty_name"; static final String ARCH = "arch"; static final String VERSION = "version"; static final String REFRESH_INTERVAL = "refresh_interval"; @@ -108,6 +130,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (name != null) { builder.field(Fields.NAME, name); } + if (prettyName != null) { + builder.field(Fields.PRETTY_NAME, prettyName); + } if (arch != null) { builder.field(Fields.ARCH, arch); } diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java index 9e0aa24a10526..06a5aadd22945 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java @@ -19,8 +19,8 @@ package org.elasticsearch.monitor.os; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; @@ -36,6 +36,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; public class OsProbe { @@ -519,9 +521,68 @@ public static OsProbe getInstance() { private final Logger logger = LogManager.getLogger(getClass()); - public OsInfo osInfo(long refreshInterval, int allocatedProcessors) { - return new OsInfo(refreshInterval, Runtime.getRuntime().availableProcessors(), - allocatedProcessors, Constants.OS_NAME, Constants.OS_ARCH, Constants.OS_VERSION); + OsInfo osInfo(long refreshInterval, int allocatedProcessors) throws IOException { + return new OsInfo( + refreshInterval, + Runtime.getRuntime().availableProcessors(), + allocatedProcessors, + Constants.OS_NAME, + getPrettyName(), + Constants.OS_ARCH, + Constants.OS_VERSION); + } + + private String getPrettyName() throws IOException { + // TODO: return a prettier name on non-Linux OS + if (Constants.LINUX) { + /* + * We read the lines from /etc/os-release (or /usr/lib/os-release) to extract the PRETTY_NAME. The format of this file is + * newline-separated key-value pairs. The key and value are separated by an equals symbol (=). The value can unquoted, or + * wrapped in single- or double-quotes. + */ + final List etcOsReleaseLines = readOsRelease(); + final List prettyNameLines = + etcOsReleaseLines.stream().filter(line -> line.startsWith("PRETTY_NAME")).collect(Collectors.toList()); + assert prettyNameLines.size() <= 1 : prettyNameLines; + final Optional maybePrettyNameLine = + prettyNameLines.size() == 1 ? Optional.of(prettyNameLines.get(0)) : Optional.empty(); + if (maybePrettyNameLine.isPresent()) { + final String prettyNameLine = maybePrettyNameLine.get(); + final String[] prettyNameFields = prettyNameLine.split("="); + assert prettyNameFields.length == 2 : prettyNameLine; + if (prettyNameFields[1].length() >= 3 && + (prettyNameFields[1].startsWith("\"") && prettyNameFields[1].endsWith("\"")) || + (prettyNameFields[1].startsWith("'") && prettyNameFields[1].endsWith("'"))) { + return prettyNameFields[1].substring(1, prettyNameFields[1].length() - 1); + } else { + return prettyNameFields[1]; + } + } else { + return Constants.OS_NAME; + } + + } else { + return Constants.OS_NAME; + } + } + + /** + * The lines from {@code /etc/os-release} or {@code /usr/lib/os-release} as a fallback. These file represents identification of the + * underlying operating system. The structure of the file is newlines of key-value pairs of shell-compatible variable assignments. + * + * @return the lines from {@code /etc/os-release} or {@code /usr/lib/os-release} + * @throws IOException if an I/O exception occurs reading {@code /etc/os-release} or {@code /usr/lib/os-release} + */ + @SuppressForbidden(reason = "access /etc/os-release or /usr/lib/os-release") + List readOsRelease() throws IOException { + final List lines; + if (Files.exists(PathUtils.get("/etc/os-release"))) { + lines = Files.readAllLines(PathUtils.get("/etc/os-release")); + } else { + lines = Files.readAllLines(PathUtils.get("/usr/lib/os-release")); + } + assert lines != null && lines.isEmpty() == false; + return lines; } public OsStats osStats() { diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsService.java b/server/src/main/java/org/elasticsearch/monitor/os/OsService.java index 8812c1f25b766..3727b4dcd1860 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsService.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsService.java @@ -27,6 +27,8 @@ import org.elasticsearch.common.util.SingleObjectCache; import org.elasticsearch.common.util.concurrent.EsExecutors; +import java.io.IOException; + public class OsService extends AbstractComponent { private final OsProbe probe; @@ -37,7 +39,7 @@ public class OsService extends AbstractComponent { Setting.timeSetting("monitor.os.refresh_interval", TimeValue.timeValueSeconds(1), TimeValue.timeValueSeconds(1), Property.NodeScope); - public OsService(Settings settings) { + public OsService(Settings settings) throws IOException { this.probe = OsProbe.getInstance(); TimeValue refreshInterval = REFRESH_INTERVAL_SETTING.get(settings); this.info = probe.osInfo(refreshInterval.millis(), EsExecutors.numberOfProcessors(settings)); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 6480ef3ffaebc..f3433dfa1ba1a 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -26,7 +26,6 @@ import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.search.SearchExecutionStatsCollector; @@ -279,7 +278,7 @@ protected Node( final JvmInfo jvmInfo = JvmInfo.jvmInfo(); logger.info( "version[{}], pid[{}], build[{}/{}/{}/{}], OS[{}/{}/{}], JVM[{}/{}/{}/{}]", - Version.displayVersion(Version.CURRENT, Build.CURRENT.isSnapshot()), + Build.CURRENT.getQualifiedVersion(), jvmInfo.pid(), Build.CURRENT.flavor().displayName(), Build.CURRENT.type().displayName(), @@ -293,7 +292,11 @@ protected Node( Constants.JAVA_VERSION, Constants.JVM_VERSION); logger.info("JVM arguments {}", Arrays.toString(jvmInfo.getInputArguments())); - warnIfPreRelease(Version.CURRENT, Build.CURRENT.isSnapshot(), logger); + if (Build.CURRENT.isProductionRelease() == false) { + logger.warn( + "version [{}] is a pre-release version of Elasticsearch and is not suitable for production", + Build.CURRENT.getQualifiedVersion()); + } if (logger.isDebugEnabled()) { logger.debug("using config [{}], data [{}], logs [{}], plugins [{}]", @@ -490,7 +493,7 @@ protected Node( final List> tasksExecutors = pluginsService .filterPlugins(PersistentTaskPlugin.class).stream() - .map(p -> p.getPersistentTasksExecutor(clusterService, threadPool, client)) + .map(p -> p.getPersistentTasksExecutor(clusterService, threadPool, client, settingsModule)) .flatMap(List::stream) .collect(toList()); @@ -577,14 +580,6 @@ protected Node( } } - static void warnIfPreRelease(final Version version, final boolean isSnapshot, final Logger logger) { - if (!version.isRelease() || isSnapshot) { - logger.warn( - "version [{}] is a pre-release version of Elasticsearch and is not suitable for production", - Version.displayVersion(version, isSnapshot)); - } - } - protected TransportService newTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor interceptor, Function localNodeFactory, diff --git a/server/src/main/java/org/elasticsearch/plugins/PersistentTaskPlugin.java b/server/src/main/java/org/elasticsearch/plugins/PersistentTaskPlugin.java index 5e3319a2bc5cd..7c383f752071a 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PersistentTaskPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/PersistentTaskPlugin.java @@ -20,6 +20,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.threadpool.ThreadPool; @@ -35,7 +36,9 @@ public interface PersistentTaskPlugin { * Returns additional persistent tasks executors added by this plugin. */ default List> getPersistentTasksExecutor(ClusterService clusterService, - ThreadPool threadPool, Client client) { + ThreadPool threadPool, + Client client, + SettingsModule settingsModule) { return Collections.emptyList(); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java index 0e242bb6d9f78..74c30a9f5d0e1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java @@ -73,11 +73,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC BulkRequest bulkRequest = Requests.bulkRequest(); String defaultIndex = request.param("index"); String defaultType = request.param("type"); - final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); - if (includeTypeName == false && defaultType != null) { - throw new IllegalArgumentException("You may only use the [include_type_name=false] option with the bulx APIs with the " + - "[_bulk] and [{index}/_bulk] endpoints."); - } if (defaultType == null) { defaultType = MapperService.SINGLE_MAPPING_NAME; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java index 05b60d3d7cbb2..1ff0fdf280489 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestDeleteAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -48,13 +47,9 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); - final String type = request.param("type"); - if (includeTypeName == false && MapperService.SINGLE_MAPPING_NAME.equals(type) == false) { - throw new IllegalArgumentException("You may only use the [include_type_name=false] option with the delete API with the " + - "[{index}/_doc/{id}] endpoints."); - } - DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), type, request.param("id")); + DeleteRequest deleteRequest = new DeleteRequest(request.param("index"), + request.param("type"), + request.param("id")); deleteRequest.routing(request.param("routing")); deleteRequest.timeout(request.paramAsTime("timeout", DeleteRequest.DEFAULT_TIMEOUT)); deleteRequest.setRefreshPolicy(request.param("refresh")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java index 8044600d6196e..0a21188f54563 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -56,13 +55,9 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); - final String type = request.param("type"); - if (includeTypeName == false && MapperService.SINGLE_MAPPING_NAME.equals(type) == false) { - throw new IllegalArgumentException("You may only use the [include_type_name=false] option with the get APIs with the " + - "[{index}/_doc/{id}] endpoint."); - } - final GetRequest getRequest = new GetRequest(request.param("index"), type, request.param("id")); + final GetRequest getRequest = new GetRequest(request.param("index"), + request.param("type"), + request.param("id")); getRequest.refresh(request.paramAsBoolean("refresh", getRequest.refresh())); getRequest.routing(request.param("routing")); getRequest.preference(request.param("preference")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java index 29cc6e8e028a8..b672445421af8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.VersionType; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -51,13 +50,9 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); - final String type = request.param("type"); - if (includeTypeName == false && MapperService.SINGLE_MAPPING_NAME.equals(type) == false) { - throw new IllegalArgumentException("You may only use the [include_type_name=false] option with the update API with the " + - "[{index}/_doc/{id}/_update] endpoint."); - } - UpdateRequest updateRequest = new UpdateRequest(request.param("index"), type, request.param("id")); + UpdateRequest updateRequest = new UpdateRequest(request.param("index"), + request.param("type"), + request.param("id")); updateRequest.routing(request.param("routing")); updateRequest.timeout(request.paramAsTime("timeout", updateRequest.timeout())); updateRequest.setRefreshPolicy(request.param("refresh")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 3efa9e633de30..0a0bf1b6e38eb 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -150,13 +150,8 @@ public static void parseSearchRequest(SearchRequest searchRequest, RestRequest r searchRequest.scroll(new Scroll(parseTimeValue(scroll, null, "scroll"))); } - final boolean includeTypeName = request.paramAsBoolean("include_type_name", true); String types = request.param("type"); if (types != null) { - if (includeTypeName == false) { - throw new IllegalArgumentException("You may only use the [include_type_name=false] option with the search API with the " + - "[{index}/_search] endpoint."); - } deprecationLogger.deprecated("The {index}/{type}/_search endpoint is deprecated, use {index}/_search instead"); } searchRequest.types(Strings.splitStringByCommaToArray(types)); diff --git a/server/src/main/java/org/elasticsearch/script/ScoreScript.java b/server/src/main/java/org/elasticsearch/script/ScoreScript.java index 5c533298cbe13..c88c68fd407a2 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreScript.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreScript.java @@ -26,6 +26,8 @@ import java.io.IOException; import java.io.UncheckedIOException; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; import java.util.function.DoubleSupplier; @@ -34,6 +36,22 @@ */ public abstract class ScoreScript { + private static final Map DEPRECATIONS; + static { + Map deprecations = new HashMap<>(); + deprecations.put( + "doc", + "Accessing variable [doc] via [params.doc] from within a score script " + + "is deprecated in favor of directly accessing [doc]." + ); + deprecations.put( + "_doc", + "Accessing variable [doc] via [params._doc] from within a score script " + + "is deprecated in favor of directly accessing [doc]." + ); + DEPRECATIONS = Collections.unmodifiableMap(deprecations); + } + public static final String[] PARAMETERS = new String[]{}; /** The generic runtime parameters for the script. */ @@ -45,9 +63,18 @@ public abstract class ScoreScript { private DoubleSupplier scoreSupplier = () -> 0.0; public ScoreScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { - this.params = params; // null check needed b/c of expression engine subclass - this.leafLookup = lookup == null ? null : lookup.getLeafSearchLookup(leafContext); + if (lookup == null) { + assert params == null; + assert leafContext == null; + this.params = null; + this.leafLookup = null; + } else { + this.leafLookup = lookup.getLeafSearchLookup(leafContext); + params = new HashMap<>(params); + params.putAll(leafLookup.asMap()); + this.params = new ParameterMap(params, DEPRECATIONS); + } } public abstract double execute(); diff --git a/server/src/main/java/org/elasticsearch/script/ScriptStats.java b/server/src/main/java/org/elasticsearch/script/ScriptStats.java index abf54e7e3d0a2..06eec72c0e0da 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptStats.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptStats.java @@ -42,14 +42,14 @@ public ScriptStats(long compilations, long cacheEvictions, long compilationLimit public ScriptStats(StreamInput in) throws IOException { compilations = in.readVLong(); cacheEvictions = in.readVLong(); - compilationLimitTriggered = in.getVersion().onOrAfter(Version.V_7_0_0_alpha1) ? in.readVLong() : 0; + compilationLimitTriggered = in.getVersion().onOrAfter(Version.V_7_0_0) ? in.readVLong() : 0; } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(compilations); out.writeVLong(cacheEvictions); - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { out.writeVLong(compilationLimitTriggered); } } diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index d681a186892db..0b17ec72fbb17 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -154,7 +154,7 @@ final class DefaultSearchContext extends SearchContext { private final Map searchExtBuilders = new HashMap<>(); private final Map, Collector> queryCollectors = new HashMap<>(); private final QueryShardContext queryShardContext; - private FetchPhase fetchPhase; + private final FetchPhase fetchPhase; DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher, ClusterService clusterService, IndexService indexService, @@ -186,7 +186,7 @@ final class DefaultSearchContext extends SearchContext { @Override public void doClose() { - // clear and scope phase we have + // clear and scope phase we have Releasables.close(searcher, engineSearcher); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 71ea55e97a762..7eb1139705dfc 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -429,7 +429,7 @@ public XContentBuilder toInnerXContent(XContentBuilder builder, Params params) t if (index != null) { builder.field(Fields._INDEX, RemoteClusterAware.buildRemoteIndexName(clusterAlias, index)); } - if (type != null && params.paramAsBoolean("include_type_name", true)) { + if (type != null) { builder.field(Fields._TYPE, type); } if (id != null) { diff --git a/server/src/main/java/org/elasticsearch/search/SearchPhase.java b/server/src/main/java/org/elasticsearch/search/SearchPhase.java index 33260706b3cb2..72b5f26c97486 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/SearchPhase.java @@ -20,6 +20,7 @@ package org.elasticsearch.search; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.tasks.Task; /** * Represents a phase of a search request e.g. query, fetch etc. @@ -35,4 +36,32 @@ public interface SearchPhase { * Executes the search phase */ void execute(SearchContext context); + + class SearchContextSourcePrinter { + private final SearchContext searchContext; + + public SearchContextSourcePrinter(SearchContext searchContext) { + this.searchContext = searchContext; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append(searchContext.indexShard().shardId()); + builder.append(" "); + if (searchContext.request() != null && + searchContext.request().source() != null) { + builder.append("source[").append(searchContext.request().source().toString()).append("], "); + } else { + builder.append("source[], "); + } + if (searchContext.getTask() != null && + searchContext.getTask().getHeader(Task.X_OPAQUE_ID) != null) { + builder.append("id[").append(searchContext.getTask().getHeader(Task.X_OPAQUE_ID)).append("], "); + } else { + builder.append("id[], "); + } + return builder.toString(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java index 32a94f59be5c2..927534afc852b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -72,7 +71,7 @@ public abstract class CompositeValuesSourceBuilder config = ValuesSourceConfig.resolve(context.getQueryShardContext(), valueType, field, script, null,null, format); - - if (config.unmapped() && field != null && missingBucket == false) { - // this source cannot produce any values so we refuse to build - // since composite buckets are not created on null values by default. - throw new QueryShardException(context.getQueryShardContext(), - "failed to find field [" + field + "] and [missing_bucket] is not set"); - } return innerBuild(context, config); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index d593a2408e67a..c152a5d5bc497 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -21,6 +21,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.joda.Joda; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; @@ -53,7 +54,7 @@ public static ValuesSourceConfig resolve( if (field == null) { if (script == null) { ValuesSourceConfig config = new ValuesSourceConfig<>(ValuesSourceType.ANY); - config.format(resolveFormat(null, valueType)); + config.format(resolveFormat(null, valueType, timeZone)); return config; } ValuesSourceType valuesSourceType = valueType != null ? valueType.getValuesSourceType() : ValuesSourceType.ANY; @@ -67,7 +68,7 @@ public static ValuesSourceConfig resolve( ValuesSourceConfig config = new ValuesSourceConfig<>(valuesSourceType); config.missing(missing); config.timezone(timeZone); - config.format(resolveFormat(format, valueType)); + config.format(resolveFormat(format, valueType, timeZone)); config.script(createScript(script, context)); config.scriptValueType(valueType); return config; @@ -79,7 +80,7 @@ public static ValuesSourceConfig resolve( ValuesSourceConfig config = new ValuesSourceConfig<>(valuesSourceType); config.missing(missing); config.timezone(timeZone); - config.format(resolveFormat(format, valueType)); + config.format(resolveFormat(format, valueType, timeZone)); config.unmapped(true); if (valueType != null) { // todo do we really need this for unmapped? @@ -120,7 +121,7 @@ private static AggregationScript.LeafFactory createScript(Script script, QuerySh } } - private static DocValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType) { + private static DocValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType, @Nullable DateTimeZone tz) { if (valueType == null) { return DocValueFormat.RAW; // we can't figure it out } @@ -128,6 +129,9 @@ private static DocValueFormat resolveFormat(@Nullable String format, @Nullable V if (valueFormat instanceof DocValueFormat.Decimal && format != null) { valueFormat = new DocValueFormat.Decimal(format); } + if (valueFormat instanceof DocValueFormat.DateTime && format != null) { + valueFormat = new DocValueFormat.DateTime(Joda.forPattern(format), tz != null ? tz : DateTimeZone.UTC); + } return valueFormat; } diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java index 718b895217433..46999e0f6daac 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java @@ -129,7 +129,7 @@ public static void writeFieldStats(StreamOutput out, ObjectObjectHashMap= 0; out.writeVLong(statistics.maxDoc()); - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { // stats are always positive numbers out.writeVLong(statistics.docCount()); out.writeVLong(statistics.sumTotalTermFreq()); @@ -176,7 +176,7 @@ public static ObjectObjectHashMap readFieldStats(S final long docCount; final long sumTotalTermFreq; final long sumDocFreq; - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { // stats are always positive numbers docCount = in.readVLong(); sumTotalTermFreq = in.readVLong(); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 1b4cbbbd882bc..d5d081e96972c 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -19,6 +19,8 @@ package org.elasticsearch.search.fetch; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.DocIdSetIterator; @@ -73,6 +75,7 @@ * after reducing all of the matches returned by the query phase */ public class FetchPhase implements SearchPhase { + private static final Logger LOGGER = LogManager.getLogger(FetchPhase.class); private final FetchSubPhase[] fetchSubPhases; @@ -87,6 +90,11 @@ public void preProcess(SearchContext context) { @Override public void execute(SearchContext context) { + + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("{}", new SearchContextSourcePrinter(context)); + } + final FieldsVisitor fieldsVisitor; Map> storedToRequestedFields = new HashMap<>(); StoredFieldsContext storedFieldsContext = context.storedFieldsContext(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 04a4629e9a875..8e7c2ef013a43 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -217,4 +217,8 @@ public CollectionStatistics collectionStatistics(String field) throws IOExceptio public DirectoryReader getDirectoryReader() { return engineSearcher.getDirectoryReader(); } + + public Engine.Searcher getEngineSearcher() { + return engineSearcher; + } } diff --git a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java index 3523966b7eda4..10fc6a648af66 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -19,6 +19,8 @@ package org.elasticsearch.search.query; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.MinDocQuery; @@ -73,6 +75,7 @@ * (document ids and score or sort criteria) so that matches can be reduced on the coordinating node */ public class QueryPhase implements SearchPhase { + private static final Logger LOGGER = LogManager.getLogger(QueryPhase.class); private final AggregationPhase aggregationPhase; private final SuggestPhase suggestPhase; @@ -99,6 +102,11 @@ public void execute(SearchContext searchContext) throws QueryPhaseExecutionExcep new DocValueFormat[0]); return; } + + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("{}", new SearchContextSourcePrinter(searchContext)); + } + // Pre-process aggregations as late as possible. In the case of a DFS_Q_T_F // request, preProcess is called on the DFS phase phase, this is why we pre-process them // here to make sure it happens during the QUERY phase diff --git a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java index 9b11de93bee8a..40e10eb589006 100644 --- a/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/slice/SliceBuilder.java @@ -260,7 +260,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, if ("_uid".equals(field)) { // on new indices, the _id acts as a _uid field = IdFieldMapper.NAME; - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_7_0_0_alpha1)) { + if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_7_0_0)) { throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead"); } DEPRECATION_LOG.deprecated("Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead"); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java index c957b16502724..b2f2a336d3684 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java @@ -93,7 +93,7 @@ public Suggest(List>> suggestions) public Suggest(StreamInput in) throws IOException { // in older versions, Suggestion types were serialized as Streamable - if (in.getVersion().before(Version.V_7_0_0_alpha1)) { + if (in.getVersion().before(Version.V_7_0_0)) { final int size = in.readVInt(); suggestions = new ArrayList<>(size); for (int i = 0; i < size; i++) { @@ -161,7 +161,7 @@ public boolean hasScoreDocs() { @Override public void writeTo(StreamOutput out) throws IOException { // in older versions, Suggestion types were serialized as Streamable - if (out.getVersion().before(Version.V_7_0_0_alpha1)) { + if (out.getVersion().before(Version.V_7_0_0)) { out.writeVInt(suggestions.size()); for (Suggestion command : suggestions) { out.writeVInt(command.getWriteableType()); @@ -279,7 +279,7 @@ public Suggestion(StreamInput in) throws IOException { size = in.readVInt(); // this is a hack to work around slightly different serialization order of earlier versions of TermSuggestion - if (in.getVersion().before(Version.V_7_0_0_alpha1) && this instanceof TermSuggestion) { + if (in.getVersion().before(Version.V_7_0_0) && this instanceof TermSuggestion) { TermSuggestion t = (TermSuggestion) this; t.setSort(SortBy.readFromStream(in)); } @@ -389,7 +389,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(size); // this is a hack to work around slightly different serialization order in older versions of TermSuggestion - if (out.getVersion().before(Version.V_7_0_0_alpha1) && this instanceof TermSuggestion) { + if (out.getVersion().before(Version.V_7_0_0) && this instanceof TermSuggestion) { TermSuggestion termSuggestion = (TermSuggestion) this; termSuggestion.getSort().writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java index ae9cf6fc8c2f9..b908fadd55845 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoContextMapping.java @@ -292,7 +292,7 @@ protected void validateReferences(Version indexVersionCreated, Function> innerExecute(String name, P for (int i = 0; i < numGenerators; i++) { PhraseSuggestionContext.DirectCandidateGenerator generator = generators.get(i); DirectSpellChecker directSpellChecker = generator.createDirectSpellChecker(); - Terms terms = MultiFields.getTerms(indexReader, generator.field()); + Terms terms = MultiTerms.getTerms(indexReader, generator.field()); if (terms != null) { gens.add(new DirectCandidateGenerator(directSpellChecker, generator.field(), generator.suggestMode(), indexReader, realWordErrorLikelihood, generator.size(), generator.preFilter(), generator.postFilter(), terms)); } } final String suggestField = suggestion.getField(); - final Terms suggestTerms = MultiFields.getTerms(indexReader, suggestField); + final Terms suggestTerms = MultiTerms.getTerms(indexReader, suggestField); if (gens.size() > 0 && suggestTerms != null) { final NoisyChannelSpellChecker checker = new NoisyChannelSpellChecker(realWordErrorLikelihood, suggestion.getRequireUnigram(), suggestion.getTokenLimit()); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java index 1bdf1c90d7d09..b13f33f76394b 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/WordScorer.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.suggest.phrase; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.MultiTerms; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.util.BytesRef; @@ -45,7 +45,7 @@ public abstract class WordScorer { private final boolean useTotalTermFreq; public WordScorer(IndexReader reader, String field, double realWordLikelyHood, BytesRef separator) throws IOException { - this(reader, MultiFields.getTerms(reader, field), field, realWordLikelyHood, separator); + this(reader, MultiTerms.getTerms(reader, field), field, realWordLikelyHood, separator); } public WordScorer(IndexReader reader, Terms terms, String field, double realWordLikelyHood, BytesRef separator) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestion.java b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestion.java index 1855f1f88e5fb..b8e334f92835c 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestion.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestion.java @@ -62,7 +62,7 @@ public TermSuggestion(String name, int size, SortBy sort) { public TermSuggestion(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { sort = SortBy.readFromStream(in); } } @@ -137,7 +137,7 @@ protected Comparator