diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh index 6a7e2e69faebf..ce78ebcc1cae7 100755 --- a/.ci/packer_cache.sh +++ b/.ci/packer_cache.sh @@ -16,4 +16,9 @@ while [ -h "$SCRIPT" ] ; do done source $(dirname "${SCRIPT}")/java-versions.properties -JAVA_HOME="${HOME}"/.java/${ES_BUILD_JAVA} ./gradlew --parallel resolveAllDependencies composePull +export JAVA_HOME="${HOME}"/.java/${ES_BUILD_JAVA} +# We are caching BWC versions too, need these so we can build those +export JAVA8_HOME="${HOME}"/.java/java8 +export JAVA11_HOME="${HOME}"/.java/java11 +export JAVA12_HOME="${HOME}"/.java/java12 +./gradlew --parallel clean pullFixture --scan -Porg.elasticsearch.acceptScanTOS=true -s resolveAllDependencies diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 0acdc95c86bbf..2b5e4f2d24d1f 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -183,8 +183,12 @@ if (project != rootProject) { } dependencies { - distribution project(':distribution:archives:zip') - distribution project(':distribution:archives:oss-zip') + distribution project(':distribution:archives:windows-zip') + distribution project(':distribution:archives:oss-windows-zip') + distribution project(':distribution:archives:darwin-tar') + distribution project(':distribution:archives:oss-darwin-tar') + distribution project(':distribution:archives:linux-tar') + distribution project(':distribution:archives:oss-linux-tar') } String localDownloads = "${rootProject.buildDir}/local-downloads" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index e38cb854a10b0..2fffd67215581 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -191,13 +191,21 @@ class ClusterFormationTasks { throw new GradleException("Unknown distribution: ${distro} in project ${project.path}") } Version version = Version.fromString(elasticsearchVersion) - String group = "downloads.zip" // dummy group, does not matter except for integ-test-zip, it is ignored by the fake ivy repo + String os = getOs() + String classifier = "${os}-x86_64" + String packaging = os.equals('windows') ? 'zip' : 'tar.gz' String artifactName = 'elasticsearch' if (distro.equals('oss') && Version.fromString(elasticsearchVersion).onOrAfter('6.3.0')) { artifactName += '-oss' } - String snapshotProject = distro == 'oss' ? 'oss-zip' : 'zip' Object dependency + String snapshotProject = "${os}-${os.equals('windows') ? 'zip' : 'tar'}" + if (version.before("7.0.0")) { + snapshotProject = "zip" + } + if (distro.equals("oss")) { + snapshotProject = "oss-" + snapshotProject + } boolean internalBuild = project.hasProperty('bwcVersions') VersionCollection.UnreleasedVersionInfo unreleasedInfo = null if (project.hasProperty('bwcVersions')) { @@ -205,11 +213,16 @@ class ClusterFormationTasks { unreleasedInfo = project.bwcVersions.unreleasedInfo(version) } if (unreleasedInfo != null) { - dependency = project.dependencies.project(path: ":distribution:bwc:${unreleasedInfo.gradleProjectName}", configuration: snapshotProject) + dependency = project.dependencies.project( + path: ":distribution:bwc:${unreleasedInfo.gradleProjectName}", configuration: snapshotProject) } else if (internalBuild && elasticsearchVersion.equals(VersionProperties.elasticsearch)) { dependency = project.dependencies.project(path: ":distribution:archives:${snapshotProject}") } else { - dependency = "${group}:${artifactName}:${elasticsearchVersion}@zip" + if (version.before('7.0.0')) { + classifier = "" // for bwc, before we had classifiers + } + // group does not matter as it is not used when we pull from the ivy repo that points to the download service + dependency = "dnm:${artifactName}:${elasticsearchVersion}${classifier}@${packaging}" } project.dependencies.add(configuration.name, dependency) } @@ -335,8 +348,15 @@ class ClusterFormationTasks { the elasticsearch source tree then this should be the version of elasticsearch built by the source tree. If it isn't then Bad Things(TM) will happen. */ Task extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) { - from { - project.zipTree(configuration.singleFile) + if (getOs().equals("windows")) { + from { + project.zipTree(configuration.singleFile) + } + } else { + // macos and linux use tar + from { + project.tarTree(project.resources.gzip(configuration.singleFile)) + } } into node.baseDir } @@ -948,4 +968,15 @@ class ClusterFormationTasks { PluginPropertiesExtension extension = pluginProject.extensions.findByName('esplugin') return extension.name } + + /** Find the current OS */ + static String getOs() { + String os = "linux" + if (Os.FAMILY_WINDOWS) { + os = "windows" + } else if (Os.FAMILY_MAC) { + os = "darwin" + } + return os + } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index 9f5364b78a896..8f54d63f4ca14 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -53,10 +53,10 @@ class VagrantTestPlugin implements Plugin { /** All distributions to bring into test VM, whether or not they are used **/ static final List DISTRIBUTIONS = unmodifiableList([ - 'archives:tar', - 'archives:oss-tar', - 'archives:zip', - 'archives:oss-zip', + 'archives:linux-tar', + 'archives:oss-linux-tar', + 'archives:windows-zip', + 'archives:oss-windows-zip', 'packages:rpm', 'packages:oss-rpm', 'packages:deb', diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java index 35a7eacf1fde1..3dfccaf435031 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java @@ -59,14 +59,17 @@ public void apply(Project project) { disableTaskByType(tasks, JarHellTask.class); Task buildFixture = project.getTasks().create("buildFixture"); + Task pullFixture = project.getTasks().create("pullFixture"); Task preProcessFixture = project.getTasks().create("preProcessFixture"); buildFixture.dependsOn(preProcessFixture); + pullFixture.dependsOn(preProcessFixture); Task postProcessFixture = project.getTasks().create("postProcessFixture"); if (dockerComposeSupported(project) == false) { preProcessFixture.setEnabled(false); postProcessFixture.setEnabled(false); buildFixture.setEnabled(false); + pullFixture.setEnabled(false); return; } @@ -81,7 +84,9 @@ public void apply(Project project) { ); buildFixture.dependsOn(tasks.getByName("composeUp")); + pullFixture.dependsOn(tasks.getByName("composePull")); tasks.getByName("composeUp").mustRunAfter(preProcessFixture); + tasks.getByName("composePull").mustRunAfter(preProcessFixture); postProcessFixture.dependsOn(buildFixture); configureServiceInfoForTask( diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java index df1e5dc01aef5..526db2a86a761 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java @@ -46,6 +46,8 @@ static Request putFollow(PutFollowRequest putFollowRequest) throws IOException { .addPathPartAsIs("_ccr", "follow") .build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withWaitForActiveShards(putFollowRequest.waitForActiveShards()); request.setEntity(createEntity(putFollowRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index c93c9c67e8f7f..2f5bd65fba189 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -49,8 +49,6 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -61,7 +59,9 @@ import org.elasticsearch.client.indices.GetIndexTemplatesRequest; import org.elasticsearch.client.indices.GetMappingsRequest; import org.elasticsearch.client.indices.GetMappingsResponse; +import org.elasticsearch.client.indices.GetIndexTemplatesResponse; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; +import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; import org.elasticsearch.client.indices.UnfreezeIndexRequest; import org.elasticsearch.rest.RestStatus; @@ -908,6 +908,7 @@ public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, Reques AcknowledgedResponse::fromXContent, listener, emptySet()); } + /** * Puts an index template using the Index Templates API. * See Index Templates API @@ -916,9 +917,13 @@ public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, Reques * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response * @throws IOException in case there is a problem sending the request or parsing back the response + * @deprecated This old form of request allows types in mappings. Use {@link #putTemplate(PutIndexTemplateRequest, RequestOptions)} + * instead which introduces a new request object without types. */ - public AcknowledgedResponse putTemplate(PutIndexTemplateRequest putIndexTemplateRequest, - RequestOptions options) throws IOException { + @Deprecated + public AcknowledgedResponse putTemplate( + org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest putIndexTemplateRequest, + RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(putIndexTemplateRequest, IndicesRequestConverters::putTemplate, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -930,9 +935,44 @@ public AcknowledgedResponse putTemplate(PutIndexTemplateRequest putIndexTemplate * @param putIndexTemplateRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @deprecated This old form of request allows types in mappings. + * Use {@link #putTemplateAsync(PutIndexTemplateRequest, RequestOptions, ActionListener)} + * instead which introduces a new request object without types. */ - public void putTemplateAsync(PutIndexTemplateRequest putIndexTemplateRequest, RequestOptions options, - ActionListener listener) { + @Deprecated + public void putTemplateAsync(org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest putIndexTemplateRequest, + RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(putIndexTemplateRequest, IndicesRequestConverters::putTemplate, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + + /** + * Puts an index template using the Index Templates API. + * See Index Templates API + * on elastic.co + * @param putIndexTemplateRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse putTemplate( + PutIndexTemplateRequest putIndexTemplateRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(putIndexTemplateRequest, IndicesRequestConverters::putTemplate, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously puts an index template using the Index Templates API. + * See Index Templates API + * on elastic.co + * @param putIndexTemplateRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putTemplateAsync(PutIndexTemplateRequest putIndexTemplateRequest, + RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(putIndexTemplateRequest, IndicesRequestConverters::putTemplate, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } @@ -968,33 +1008,74 @@ public void validateQueryAsync(ValidateQueryRequest validateQueryRequest, Reques } /** - * Gets index templates using the Index Templates API + * Gets index templates using the Index Templates API. The mappings will be returned in a legacy deprecated format, where the + * mapping definition is nested under the type name. * See Index Templates API * on elastic.co * @param getIndexTemplatesRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response * @throws IOException in case there is a problem sending the request or parsing back the response + * @deprecated This method uses an old response object which still refers to types, a deprecated feature. Use + * {@link #getIndexTemplate(GetIndexTemplatesRequest, RequestOptions)} instead which returns a new response object */ - public GetIndexTemplatesResponse getTemplate(GetIndexTemplatesRequest getIndexTemplatesRequest, - RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getIndexTemplatesRequest, IndicesRequestConverters::getTemplates, - options, GetIndexTemplatesResponse::fromXContent, emptySet()); + @Deprecated + public org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse getTemplate( + GetIndexTemplatesRequest getIndexTemplatesRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(getIndexTemplatesRequest, + IndicesRequestConverters::getTemplatesWithDocumentTypes, + options, org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse::fromXContent, emptySet()); } + + /** + * Gets index templates using the Index Templates API + * See Index Templates API + * on elastic.co + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param getIndexTemplatesRequest the request + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetIndexTemplatesResponse getIndexTemplate(GetIndexTemplatesRequest getIndexTemplatesRequest, RequestOptions options) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(getIndexTemplatesRequest, + IndicesRequestConverters::getTemplates, + options, GetIndexTemplatesResponse::fromXContent, emptySet()); + } /** - * Asynchronously gets index templates using the Index Templates API + * Asynchronously gets index templates using the Index Templates API. The mappings will be returned in a legacy deprecated format, + * where the mapping definition is nested under the type name. * See Index Templates API * on elastic.co * @param getIndexTemplatesRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion + * @deprecated This method uses an old response object which still refers to types, a deprecated feature. Use + * {@link #getIndexTemplateAsync(GetIndexTemplatesRequest, RequestOptions, ActionListener)} instead which returns a new response object */ + @Deprecated public void getTemplateAsync(GetIndexTemplatesRequest getIndexTemplatesRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(getIndexTemplatesRequest, + IndicesRequestConverters::getTemplatesWithDocumentTypes, + options, org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously gets index templates using the Index Templates API + * See Index Templates API + * on elastic.co + * @param getIndexTemplatesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getIndexTemplateAsync(GetIndexTemplatesRequest getIndexTemplatesRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getIndexTemplatesRequest, IndicesRequestConverters::getTemplates, + restHighLevelClient.performRequestAsyncAndParseEntity(getIndexTemplatesRequest, + IndicesRequestConverters::getTemplates, options, GetIndexTemplatesResponse::fromXContent, listener, emptySet()); - } + } /** * Uses the Index Templates API to determine if index templates exist diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java index 4889ead93b715..13bc2b8c149db 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java @@ -43,13 +43,13 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.FreezeIndexRequest; import org.elasticsearch.client.indices.GetIndexTemplatesRequest; import org.elasticsearch.client.indices.GetMappingsRequest; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; +import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; import org.elasticsearch.client.indices.UnfreezeIndexRequest; import org.elasticsearch.common.Strings; @@ -416,7 +416,13 @@ static Request indexPutSettings(UpdateSettingsRequest updateSettingsRequest) thr return request; } - static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException { + /** + * @deprecated This uses the old form of PutIndexTemplateRequest which uses types. + * Use (@link {@link #putTemplate(PutIndexTemplateRequest)} instead + */ + @Deprecated + static Request putTemplate(org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest putIndexTemplateRequest) + throws IOException { String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_template") .addPathPart(putIndexTemplateRequest.name()).build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); @@ -433,6 +439,22 @@ static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) thro return request; } + static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_template") + .addPathPart(putIndexTemplateRequest.name()).build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(putIndexTemplateRequest.masterNodeTimeout()); + if (putIndexTemplateRequest.create()) { + params.putParam("create", Boolean.TRUE.toString()); + } + if (Strings.hasText(putIndexTemplateRequest.cause())) { + params.putParam("cause", putIndexTemplateRequest.cause()); + } + request.setEntity(RequestConverters.createEntity(putIndexTemplateRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request validateQuery(ValidateQueryRequest validateQueryRequest) throws IOException { String[] indices = validateQueryRequest.indices() == null ? Strings.EMPTY_ARRAY : validateQueryRequest.indices(); String[] types = validateQueryRequest.types() == null || indices.length <= 0 ? Strings.EMPTY_ARRAY : validateQueryRequest.types(); @@ -458,7 +480,16 @@ static Request getAlias(GetAliasesRequest getAliasesRequest) { return request; } + @Deprecated + static Request getTemplatesWithDocumentTypes(GetIndexTemplatesRequest getIndexTemplatesRequest) { + return getTemplates(getIndexTemplatesRequest, true); + } + static Request getTemplates(GetIndexTemplatesRequest getIndexTemplatesRequest) { + return getTemplates(getIndexTemplatesRequest, false); + } + + private static Request getTemplates(GetIndexTemplatesRequest getIndexTemplatesRequest, boolean includeTypeName) { final String endpoint = new RequestConverters.EndpointBuilder() .addPathPartAsIs("_template") .addCommaSeparatedPathParts(getIndexTemplatesRequest.names()) @@ -467,9 +498,11 @@ static Request getTemplates(GetIndexTemplatesRequest getIndexTemplatesRequest) { final RequestConverters.Params params = new RequestConverters.Params(request); params.withLocal(getIndexTemplatesRequest.isLocal()); params.withMasterTimeout(getIndexTemplatesRequest.getMasterNodeTimeout()); - params.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true"); + if (includeTypeName) { + params.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true"); + } return request; - } + } static Request templatesExist(IndexTemplatesExistRequest indexTemplatesExistRequest) { final String endpoint = new RequestConverters.EndpointBuilder() diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index 6b0a5d2642f02..073b92f84a3a3 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -64,6 +64,7 @@ import org.elasticsearch.client.ml.PutFilterRequest; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.RevertModelSnapshotRequest; +import org.elasticsearch.client.ml.SetUpgradeModeRequest; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StopDatafeedRequest; import org.elasticsearch.client.ml.UpdateDatafeedRequest; @@ -624,6 +625,17 @@ static Request deleteFilter(DeleteFilterRequest deleteFilterRequest) { return request; } + static Request setUpgradeMode(SetUpgradeModeRequest setUpgradeModeRequest) { + String endpoint = new EndpointBuilder().addPathPartAsIs("_ml", "set_upgrade_mode").build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.putParam(SetUpgradeModeRequest.ENABLED.getPreferredName(), Boolean.toString(setUpgradeModeRequest.isEnabled())); + if (setUpgradeModeRequest.getTimeout() != null) { + params.putParam(SetUpgradeModeRequest.TIMEOUT.getPreferredName(), setUpgradeModeRequest.getTimeout().toString()); + } + return request; + } + static Request mlInfo(MlInfoRequest infoRequest) { String endpoint = new EndpointBuilder() .addPathPartAsIs("_ml", "info") diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index aaff35a238998..2e359931c1025 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -86,6 +86,7 @@ import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.RevertModelSnapshotRequest; import org.elasticsearch.client.ml.RevertModelSnapshotResponse; +import org.elasticsearch.client.ml.SetUpgradeModeRequest; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedResponse; import org.elasticsearch.client.ml.StopDatafeedRequest; @@ -1838,4 +1839,42 @@ public void findFileStructureAsync(FindFileStructureRequest request, RequestOpti listener, Collections.emptySet()); } + + /** + * Sets the ML cluster setting upgrade_mode + *

+ * For additional info + * see Set Upgrade Mode + * + * @param request The request to set upgrade mode + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return response + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public AcknowledgedResponse setUpgradeMode(SetUpgradeModeRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::setUpgradeMode, + options, + AcknowledgedResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Sets the ML cluster setting upgrade_mode asynchronously + *

+ * For additional info + * see Set Upgrade Mode + * + * @param request The request of Machine Learning info + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void setUpgradeModeAsync(SetUpgradeModeRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::setUpgradeMode, + options, + AcknowledgedResponse::fromXContent, + listener, + Collections.emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PutFollowRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PutFollowRequest.java index 98e9d224564cf..8307b04bd7087 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PutFollowRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PutFollowRequest.java @@ -19,6 +19,7 @@ package org.elasticsearch.client.ccr; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.Validatable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -36,11 +37,17 @@ public final class PutFollowRequest extends FollowConfig implements Validatable, private final String remoteCluster; private final String leaderIndex; private final String followerIndex; + private final ActiveShardCount waitForActiveShards; public PutFollowRequest(String remoteCluster, String leaderIndex, String followerIndex) { + this(remoteCluster, leaderIndex, followerIndex, ActiveShardCount.NONE); + } + + public PutFollowRequest(String remoteCluster, String leaderIndex, String followerIndex, ActiveShardCount waitForActiveShards) { this.remoteCluster = Objects.requireNonNull(remoteCluster, "remoteCluster"); this.leaderIndex = Objects.requireNonNull(leaderIndex, "leaderIndex"); this.followerIndex = Objects.requireNonNull(followerIndex, "followerIndex"); + this.waitForActiveShards = waitForActiveShards; } @Override @@ -66,13 +73,18 @@ public String getFollowerIndex() { return followerIndex; } + public ActiveShardCount waitForActiveShards() { + return waitForActiveShards; + } + @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; if (!super.equals(o)) return false; PutFollowRequest that = (PutFollowRequest) o; - return Objects.equals(remoteCluster, that.remoteCluster) && + return Objects.equals(waitForActiveShards, that.waitForActiveShards) && + Objects.equals(remoteCluster, that.remoteCluster) && Objects.equals(leaderIndex, that.leaderIndex) && Objects.equals(followerIndex, that.followerIndex); } @@ -83,7 +95,7 @@ public int hashCode() { super.hashCode(), remoteCluster, leaderIndex, - followerIndex - ); + followerIndex, + waitForActiveShards); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetIndexTemplatesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetIndexTemplatesResponse.java new file mode 100644 index 0000000000000..ef283b31c0634 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetIndexTemplatesResponse.java @@ -0,0 +1,90 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indices; + +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; + + +public class GetIndexTemplatesResponse { + + @Override + public String toString() { + List thisList = new ArrayList<>(this.indexTemplates); + thisList.sort(Comparator.comparing(IndexTemplateMetaData::name)); + return "GetIndexTemplatesResponse [indexTemplates=" + thisList + "]"; + } + + private final List indexTemplates; + + GetIndexTemplatesResponse() { + indexTemplates = new ArrayList<>(); + } + + GetIndexTemplatesResponse(List indexTemplates) { + this.indexTemplates = indexTemplates; + } + + public List getIndexTemplates() { + return indexTemplates; + } + + + public static GetIndexTemplatesResponse fromXContent(XContentParser parser) throws IOException { + final List templates = new ArrayList<>(); + for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { + if (token == XContentParser.Token.FIELD_NAME) { + final IndexTemplateMetaData templateMetaData = IndexTemplateMetaData.Builder.fromXContent(parser, parser.currentName()); + templates.add(templateMetaData); + } + } + return new GetIndexTemplatesResponse(templates); + } + + @Override + public int hashCode() { + List sortedList = new ArrayList<>(this.indexTemplates); + sortedList.sort(Comparator.comparing(IndexTemplateMetaData::name)); + return Objects.hash(sortedList); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + // To compare results we need to make sure the templates are listed in the same order + GetIndexTemplatesResponse other = (GetIndexTemplatesResponse) obj; + List thisList = new ArrayList<>(this.indexTemplates); + List otherList = new ArrayList<>(other.indexTemplates); + thisList.sort(Comparator.comparing(IndexTemplateMetaData::name)); + otherList.sort(Comparator.comparing(IndexTemplateMetaData::name)); + return Objects.equals(thisList, otherList); + } + + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/IndexTemplateMetaData.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/IndexTemplateMetaData.java new file mode 100644 index 0000000000000..12fc747ab3473 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/IndexTemplateMetaData.java @@ -0,0 +1,300 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indices; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.MapperService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +public class IndexTemplateMetaData { + + + private final String name; + + private final int order; + + /** + * The version is an arbitrary number managed by the user so that they can easily and quickly verify the existence of a given template. + * Expected usage: + *


+     * PUT /_template/my_template
+     * {
+     *   "index_patterns": ["my_index-*"],
+     *   "mappings": { ... },
+     *   "version": 1
+     * }
+     * 
+ * Then, some process from the user can occasionally verify that the template exists with the appropriate version without having to + * check the template's content: + *

+     * GET /_template/my_template?filter_path=*.version
+     * 
+ */ + @Nullable + private final Integer version; + + private final List patterns; + + private final Settings settings; + + private final MappingMetaData mappings; + + private final ImmutableOpenMap aliases; + + public IndexTemplateMetaData(String name, int order, Integer version, + List patterns, Settings settings, + MappingMetaData mappings, + ImmutableOpenMap aliases) { + if (patterns == null || patterns.isEmpty()) { + throw new IllegalArgumentException("Index patterns must not be null or empty; got " + patterns); + } + this.name = name; + this.order = order; + this.version = version; + this.patterns= patterns; + this.settings = settings; + this.mappings = mappings; + this.aliases = aliases; + } + + public String name() { + return this.name; + } + + public int order() { + return this.order; + } + + @Nullable + public Integer version() { + return version; + } + + public List patterns() { + return this.patterns; + } + + public Settings settings() { + return this.settings; + } + + public MappingMetaData mappings() { + return this.mappings; + } + + public ImmutableOpenMap aliases() { + return this.aliases; + } + + public static Builder builder(String name) { + return new Builder(name); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IndexTemplateMetaData that = (IndexTemplateMetaData) o; + + if (order != that.order) return false; + if (!Objects.equals(mappings, that.mappings)) return false; + if (!name.equals(that.name)) return false; + if (!settings.equals(that.settings)) return false; + if (!patterns.equals(that.patterns)) return false; + + return Objects.equals(version, that.version); + } + + @Override + public int hashCode() { + return Objects.hash(name, order, version, patterns, settings, mappings); + } + + public static class Builder { + + private static final Set VALID_FIELDS = Sets.newHashSet( + "template", "order", "mappings", "settings", "index_patterns", "aliases", "version"); + + private String name; + + private int order; + + private Integer version; + + private List indexPatterns; + + private Settings settings = Settings.Builder.EMPTY_SETTINGS; + + private MappingMetaData mappings; + + private final ImmutableOpenMap.Builder aliases; + + public Builder(String name) { + this.name = name; + mappings = null; + aliases = ImmutableOpenMap.builder(); + } + + public Builder(IndexTemplateMetaData indexTemplateMetaData) { + this.name = indexTemplateMetaData.name(); + order(indexTemplateMetaData.order()); + version(indexTemplateMetaData.version()); + patterns(indexTemplateMetaData.patterns()); + settings(indexTemplateMetaData.settings()); + + mappings = indexTemplateMetaData.mappings(); + aliases = ImmutableOpenMap.builder(indexTemplateMetaData.aliases()); + } + + public Builder order(int order) { + this.order = order; + return this; + } + + public Builder version(Integer version) { + this.version = version; + return this; + } + + public Builder patterns(List indexPatterns) { + this.indexPatterns = indexPatterns; + return this; + } + + + public Builder settings(Settings.Builder settings) { + this.settings = settings.build(); + return this; + } + + public Builder settings(Settings settings) { + this.settings = settings; + return this; + } + + public Builder mapping(MappingMetaData mappings) { + this.mappings = mappings; + return this; + } + + public Builder putAlias(AliasMetaData aliasMetaData) { + aliases.put(aliasMetaData.alias(), aliasMetaData); + return this; + } + + public Builder putAlias(AliasMetaData.Builder aliasMetaData) { + aliases.put(aliasMetaData.alias(), aliasMetaData.build()); + return this; + } + + public IndexTemplateMetaData build() { + return new IndexTemplateMetaData(name, order, version, indexPatterns, settings, mappings, aliases.build()); + } + + + public static IndexTemplateMetaData fromXContent(XContentParser parser, String templateName) throws IOException { + Builder builder = new Builder(templateName); + + String currentFieldName = skipTemplateName(parser); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("settings".equals(currentFieldName)) { + Settings.Builder templateSettingsBuilder = Settings.builder(); + templateSettingsBuilder.put(Settings.fromXContent(parser)); + templateSettingsBuilder.normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX); + builder.settings(templateSettingsBuilder.build()); + } else if ("mappings".equals(currentFieldName)) { + Map mapping = parser.map(); + if (mapping.isEmpty() == false) { + MappingMetaData md = new MappingMetaData(MapperService.SINGLE_MAPPING_NAME, mapping); + builder.mapping(md); + } + } else if ("aliases".equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + builder.putAlias(AliasMetaData.Builder.fromXContent(parser)); + } + } else { + throw new ElasticsearchParseException("unknown key [{}] for index template", currentFieldName); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if ("mappings".equals(currentFieldName)) { + // The server-side IndexTemplateMetaData has toXContent impl that can return mappings + // in an array but also a comment saying this never happens with typeless APIs. + throw new ElasticsearchParseException("Invalid response format - " + + "mappings are not expected to be returned in an array", currentFieldName); + } else if ("index_patterns".equals(currentFieldName)) { + List index_patterns = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + index_patterns.add(parser.text()); + } + builder.patterns(index_patterns); + } + } else if (token.isValue()) { + // Prior to 5.1.0, elasticsearch only supported a single index pattern called `template` (#21009) + if("template".equals(currentFieldName)) { + builder.patterns(Collections.singletonList(parser.text())); + } else if ("order".equals(currentFieldName)) { + builder.order(parser.intValue()); + } else if ("version".equals(currentFieldName)) { + builder.version(parser.intValue()); + } + } + } + return builder.build(); + } + + private static String skipTemplateName(XContentParser parser) throws IOException { + XContentParser.Token token = parser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + token = parser.nextToken(); + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if (VALID_FIELDS.contains(currentFieldName)) { + return currentFieldName; + } else { + // we just hit the template name, which should be ignored and we move on + parser.nextToken(); + } + } + } + + return null; + } + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/PutIndexTemplateRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/PutIndexTemplateRequest.java new file mode 100644 index 0000000000000..5f22691b046eb --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/PutIndexTemplateRequest.java @@ -0,0 +1,453 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indices; + +import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.XContentMapValues; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; + +/** + * A request to create an index template. + */ +public class PutIndexTemplateRequest extends MasterNodeRequest implements IndicesRequest, ToXContent { + + private String name; + + private String cause = ""; + + private List indexPatterns; + + private int order; + + private boolean create; + + private Settings settings = EMPTY_SETTINGS; + + private BytesReference mappings = null; + + private final Set aliases = new HashSet<>(); + + private Integer version; + + /** + * Constructs a new put index template request with the provided name. + */ + public PutIndexTemplateRequest(String name) { + this.name(name); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (indexPatterns == null || indexPatterns.size() == 0) { + validationException = addValidationError("index patterns are missing", validationException); + } + return validationException; + } + + /** + * Sets the name of the index template. + */ + public PutIndexTemplateRequest name(String name) { + if(name == null) { + throw new IllegalArgumentException("Name cannot be null"); + } + this.name = name; + return this; + } + + /** + * The name of the index template. + */ + public String name() { + return this.name; + } + + public PutIndexTemplateRequest patterns(List indexPatterns) { + this.indexPatterns = indexPatterns; + return this; + } + + public List patterns() { + return this.indexPatterns; + } + + public PutIndexTemplateRequest order(int order) { + this.order = order; + return this; + } + + public int order() { + return this.order; + } + + public PutIndexTemplateRequest version(Integer version) { + this.version = version; + return this; + } + + public Integer version() { + return this.version; + } + + /** + * Set to {@code true} to force only creation, not an update of an index template. If it already + * exists, it will fail with an {@link IllegalArgumentException}. + */ + public PutIndexTemplateRequest create(boolean create) { + this.create = create; + return this; + } + + public boolean create() { + return create; + } + + /** + * The settings to create the index template with. + */ + public PutIndexTemplateRequest settings(Settings settings) { + this.settings = settings; + return this; + } + + /** + * The settings to create the index template with. + */ + public PutIndexTemplateRequest settings(Settings.Builder settings) { + this.settings = settings.build(); + return this; + } + + /** + * The settings to create the index template with (either json/yaml format). + */ + public PutIndexTemplateRequest settings(String source, XContentType xContentType) { + this.settings = Settings.builder().loadFromSource(source, xContentType).build(); + return this; + } + + /** + * The settings to create the index template with (either json or yaml format). + */ + public PutIndexTemplateRequest settings(Map source) { + try { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + builder.map(source); + settings(Strings.toString(builder), XContentType.JSON); + } catch (IOException e) { + throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); + } + return this; + } + + public Settings settings() { + return this.settings; + } + + /** + * Adds mapping that will be added when the index gets created. + * + * @param source The mapping source + * @param xContentType The type of content contained within the source + */ + public PutIndexTemplateRequest mapping(String source, XContentType xContentType) { + internalMapping(XContentHelper.convertToMap(new BytesArray(source), true, xContentType).v2()); + return this; + } + + /** + * The cause for this index template creation. + */ + public PutIndexTemplateRequest cause(String cause) { + this.cause = cause; + return this; + } + + public String cause() { + return this.cause; + } + + /** + * Adds mapping that will be added when the index gets created. + * + * @param source The mapping source + */ + public PutIndexTemplateRequest mapping(XContentBuilder source) { + internalMapping(XContentHelper.convertToMap(BytesReference.bytes(source), + true, source.contentType()).v2()); + return this; + } + + /** + * Adds mapping that will be added when the index gets created. + * + * @param source The mapping source + * @param xContentType the source content type + */ + public PutIndexTemplateRequest mapping(BytesReference source, XContentType xContentType) { + internalMapping(XContentHelper.convertToMap(source, true, xContentType).v2()); + return this; + } + + /** + * Adds mapping that will be added when the index gets created. + * + * @param source The mapping source + */ + public PutIndexTemplateRequest mapping(Map source) { + return internalMapping(source); + } + + private PutIndexTemplateRequest internalMapping(Map source) { + try { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); + builder.map(source); + Objects.requireNonNull(builder.contentType()); + try { + mappings = new BytesArray( + XContentHelper.convertToJson(BytesReference.bytes(builder), false, false, builder.contentType())); + return this; + } catch (IOException e) { + throw new UncheckedIOException("failed to convert source to json", e); + } + } catch (IOException e) { + throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); + } + } + + public BytesReference mappings() { + return this.mappings; + } + + /** + * The template source definition. + */ + public PutIndexTemplateRequest source(XContentBuilder templateBuilder) { + try { + return source(BytesReference.bytes(templateBuilder), templateBuilder.contentType()); + } catch (Exception e) { + throw new IllegalArgumentException("Failed to build json for template request", e); + } + } + + /** + * The template source definition. + */ + @SuppressWarnings("unchecked") + public PutIndexTemplateRequest source(Map templateSource) { + Map source = templateSource; + for (Map.Entry entry : source.entrySet()) { + String name = entry.getKey(); + if (name.equals("template")) { + if(entry.getValue() instanceof String) { + patterns(Collections.singletonList((String) entry.getValue())); + } + } else if (name.equals("index_patterns")) { + if(entry.getValue() instanceof String) { + patterns(Collections.singletonList((String) entry.getValue())); + } else if (entry.getValue() instanceof List) { + List elements = ((List) entry.getValue()).stream().map(Object::toString).collect(Collectors.toList()); + patterns(elements); + } else { + throw new IllegalArgumentException("Malformed [template] value, should be a string or a list of strings"); + } + } else if (name.equals("order")) { + order(XContentMapValues.nodeIntegerValue(entry.getValue(), order())); + } else if ("version".equals(name)) { + if ((entry.getValue() instanceof Integer) == false) { + throw new IllegalArgumentException("Malformed [version] value, should be an integer"); + } + version((Integer)entry.getValue()); + } else if (name.equals("settings")) { + if ((entry.getValue() instanceof Map) == false) { + throw new IllegalArgumentException("Malformed [settings] section, should include an inner object"); + } + settings((Map) entry.getValue()); + } else if (name.equals("mappings")) { + Map mappings = (Map) entry.getValue(); + mapping(mappings); + } else if (name.equals("aliases")) { + aliases((Map) entry.getValue()); + } else { + throw new ElasticsearchParseException("unknown key [{}] in the template ", name); + } + } + return this; + } + + /** + * The template source definition. + */ + public PutIndexTemplateRequest source(String templateSource, XContentType xContentType) { + return source(XContentHelper.convertToMap(xContentType.xContent(), templateSource, true)); + } + + /** + * The template source definition. + */ + public PutIndexTemplateRequest source(byte[] source, XContentType xContentType) { + return source(source, 0, source.length, xContentType); + } + + /** + * The template source definition. + */ + public PutIndexTemplateRequest source(byte[] source, int offset, int length, XContentType xContentType) { + return source(new BytesArray(source, offset, length), xContentType); + } + + /** + * The template source definition. + */ + public PutIndexTemplateRequest source(BytesReference source, XContentType xContentType) { + return source(XContentHelper.convertToMap(source, true, xContentType).v2()); + } + + + public Set aliases() { + return this.aliases; + } + + /** + * Sets the aliases that will be associated with the index when it gets created + */ + public PutIndexTemplateRequest aliases(Map source) { + try { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.map(source); + return aliases(BytesReference.bytes(builder)); + } catch (IOException e) { + throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); + } + } + + /** + * Sets the aliases that will be associated with the index when it gets created + */ + public PutIndexTemplateRequest aliases(XContentBuilder source) { + return aliases(BytesReference.bytes(source)); + } + + /** + * Sets the aliases that will be associated with the index when it gets created + */ + public PutIndexTemplateRequest aliases(String source) { + return aliases(new BytesArray(source)); + } + + /** + * Sets the aliases that will be associated with the index when it gets created + */ + public PutIndexTemplateRequest aliases(BytesReference source) { + // EMPTY is safe here because we never call namedObject + try (XContentParser parser = XContentHelper + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, source)) { + //move to the first alias + parser.nextToken(); + while ((parser.nextToken()) != XContentParser.Token.END_OBJECT) { + alias(Alias.fromXContent(parser)); + } + return this; + } catch(IOException e) { + throw new ElasticsearchParseException("Failed to parse aliases", e); + } + } + + /** + * Adds an alias that will be added when the index gets created. + * + * @param alias The metadata for the new alias + * @return the index template creation request + */ + public PutIndexTemplateRequest alias(Alias alias) { + aliases.add(alias); + return this; + } + + @Override + public String[] indices() { + return indexPatterns.toArray(new String[indexPatterns.size()]); + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictExpand(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("index_patterns", indexPatterns); + builder.field("order", order); + if (version != null) { + builder.field("version", version); + } + + builder.startObject("settings"); + settings.toXContent(builder, params); + builder.endObject(); + + if (mappings != null) { + builder.field("mappings"); + try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, mappings.utf8ToString())) { + builder.copyCurrentStructure(parser); + } + } + + builder.startObject("aliases"); + for (Alias alias : aliases) { + alias.toXContent(builder, params); + } + builder.endObject(); + + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/SetUpgradeModeRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/SetUpgradeModeRequest.java new file mode 100644 index 0000000000000..64e94f0251785 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/SetUpgradeModeRequest.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; + +import java.util.Objects; + +/** + * Sets ML into upgrade_mode + */ +public class SetUpgradeModeRequest extends ActionRequest { + + + public static final ParseField ENABLED = new ParseField("enabled"); + public static final ParseField TIMEOUT = new ParseField("timeout"); + + private boolean enabled; + private TimeValue timeout; + + /** + * Create a new request + * + * @param enabled whether to enable `upgrade_mode` or not + */ + public SetUpgradeModeRequest(boolean enabled) { + this.enabled = enabled; + } + + public boolean isEnabled() { + return enabled; + } + + public void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + public TimeValue getTimeout() { + return timeout; + } + + /** + * How long to wait for the request to be completed + * + * @param timeout default value of 30 seconds + */ + public void setTimeout(TimeValue timeout) { + this.timeout = timeout; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(enabled, timeout); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + SetUpgradeModeRequest that = (SetUpgradeModeRequest) other; + return Objects.equals(enabled, that.enabled) && Objects.equals(timeout, that.timeout); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java index 97a379aa16a90..ee2685dee6d92 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.ccr.CcrStatsRequest; import org.elasticsearch.client.ccr.CcrStatsResponse; @@ -95,7 +96,7 @@ public void testIndexFollowing() throws Exception { CreateIndexResponse response = highLevelClient().indices().create(createIndexRequest, RequestOptions.DEFAULT); assertThat(response.isAcknowledged(), is(true)); - PutFollowRequest putFollowRequest = new PutFollowRequest("local_cluster", "leader", "follower"); + PutFollowRequest putFollowRequest = new PutFollowRequest("local_cluster", "leader", "follower", ActiveShardCount.ONE); PutFollowResponse putFollowResponse = execute(putFollowRequest, ccrClient::putFollow, ccrClient::putFollowAsync); assertThat(putFollowResponse.isFollowIndexCreated(), is(true)); assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 708cb6687ce9e..306929d78a67a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -54,8 +54,6 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.elasticsearch.action.index.IndexRequest; @@ -72,12 +70,14 @@ import org.elasticsearch.client.indices.GetIndexTemplatesRequest; import org.elasticsearch.client.indices.GetMappingsRequest; import org.elasticsearch.client.indices.GetMappingsResponse; +import org.elasticsearch.client.indices.GetIndexTemplatesResponse; +import org.elasticsearch.client.indices.IndexTemplateMetaData; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; +import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; import org.elasticsearch.client.indices.UnfreezeIndexRequest; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Setting; @@ -86,6 +86,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexSettings; @@ -97,6 +98,8 @@ import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.elasticsearch.rest.action.admin.indices.RestGetMappingAction; import org.elasticsearch.rest.action.admin.indices.RestPutMappingAction; +import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import java.io.IOException; import java.util.Arrays; @@ -1400,8 +1403,9 @@ public void testIndexPutSettingNonExistent() throws IOException { } @SuppressWarnings("unchecked") - public void testPutTemplate() throws Exception { - PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest() + public void testPutTemplateWithTypes() throws Exception { + org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest putTemplateRequest = + new org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest() .name("my-template") .patterns(Arrays.asList("pattern-1", "name-*")) .order(10) @@ -1411,7 +1415,9 @@ public void testPutTemplate() throws Exception { .alias(new Alias("alias-1").indexRouting("abc")).alias(new Alias("{index}-write").searchRouting("xyz")); AcknowledgedResponse putTemplateResponse = execute(putTemplateRequest, - highLevelClient().indices()::putTemplate, highLevelClient().indices()::putTemplateAsync); + highLevelClient().indices()::putTemplate, highLevelClient().indices()::putTemplateAsync, + expectWarnings(RestPutIndexTemplateAction.TYPES_DEPRECATION_MESSAGE) + ); assertThat(putTemplateResponse.isAcknowledged(), equalTo(true)); Map templates = getAsMap("/_template/my-template"); @@ -1425,6 +1431,94 @@ public void testPutTemplate() throws Exception { assertThat((Map) extractValue("my-template.aliases.alias-1", templates), hasEntry("index_routing", "abc")); assertThat((Map) extractValue("my-template.aliases.{index}-write", templates), hasEntry("search_routing", "xyz")); } + + @SuppressWarnings("unchecked") + public void testPutTemplate() throws Exception { + PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest("my-template") + .patterns(Arrays.asList("pattern-1", "name-*")) + .order(10) + .create(randomBoolean()) + .settings(Settings.builder().put("number_of_shards", "3").put("number_of_replicas", "0")) + .mapping("{ \"properties\":{" + + "\"host_name\": {\"type\":\"keyword\"}" + + "}" + + "}", XContentType.JSON) + .alias(new Alias("alias-1").indexRouting("abc")).alias(new Alias("{index}-write").searchRouting("xyz")); + + AcknowledgedResponse putTemplateResponse = execute(putTemplateRequest, + highLevelClient().indices()::putTemplate, highLevelClient().indices()::putTemplateAsync); + assertThat(putTemplateResponse.isAcknowledged(), equalTo(true)); + + Map templates = getAsMap("/_template/my-template"); + assertThat(templates.keySet(), hasSize(1)); + assertThat(extractValue("my-template.order", templates), equalTo(10)); + assertThat(extractRawValues("my-template.index_patterns", templates), contains("pattern-1", "name-*")); + assertThat(extractValue("my-template.settings.index.number_of_shards", templates), equalTo("3")); + assertThat(extractValue("my-template.settings.index.number_of_replicas", templates), equalTo("0")); + assertThat(extractValue("my-template.mappings.properties.host_name.type", templates), equalTo("keyword")); + assertThat((Map) extractValue("my-template.aliases.alias-1", templates), hasEntry("index_routing", "abc")); + assertThat((Map) extractValue("my-template.aliases.{index}-write", templates), hasEntry("search_routing", "xyz")); + } + + public void testPutTemplateWithTypesUsingUntypedAPI() throws Exception { + PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest("my-template") + .patterns(Arrays.asList("pattern-1", "name-*")) + .order(10) + .create(randomBoolean()) + .settings(Settings.builder().put("number_of_shards", "3").put("number_of_replicas", "0")) + .mapping("{ " + + "\"my_doc_type\":{" + + "\"properties\":{" + + "\"host_name\": {\"type\":\"keyword\"}" + + "}" + + "}" + + "}", XContentType.JSON) + .alias(new Alias("alias-1").indexRouting("abc")).alias(new Alias("{index}-write").searchRouting("xyz")); + + + ElasticsearchStatusException badMappingError = expectThrows(ElasticsearchStatusException.class, + () -> execute(putTemplateRequest, + highLevelClient().indices()::putTemplate, highLevelClient().indices()::putTemplateAsync)); + assertThat(badMappingError.getDetailedMessage(), + containsString("Root mapping definition has unsupported parameters: [my_doc_type")); + } + + @SuppressWarnings("unchecked") + public void testPutTemplateWithNoTypesUsingTypedApi() throws Exception { + org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest putTemplateRequest = + new org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest() + .name("my-template") + .patterns(Arrays.asList("pattern-1", "name-*")) + .order(10) + .create(randomBoolean()) + .settings(Settings.builder().put("number_of_shards", "3").put("number_of_replicas", "0")) + .mapping("my_doc_type", + // Note that the declared type is missing from the mapping + "{ " + + "\"properties\":{" + + "\"host_name\": {\"type\":\"keyword\"}," + + "\"description\": {\"type\":\"text\"}" + + "}" + + "}", XContentType.JSON) + .alias(new Alias("alias-1").indexRouting("abc")).alias(new Alias("{index}-write").searchRouting("xyz")); + + AcknowledgedResponse putTemplateResponse = execute(putTemplateRequest, + highLevelClient().indices()::putTemplate, highLevelClient().indices()::putTemplateAsync, + expectWarnings(RestPutIndexTemplateAction.TYPES_DEPRECATION_MESSAGE) + ); + assertThat(putTemplateResponse.isAcknowledged(), equalTo(true)); + + Map templates = getAsMap("/_template/my-template"); + assertThat(templates.keySet(), hasSize(1)); + assertThat(extractValue("my-template.order", templates), equalTo(10)); + assertThat(extractRawValues("my-template.index_patterns", templates), contains("pattern-1", "name-*")); + assertThat(extractValue("my-template.settings.index.number_of_shards", templates), equalTo("3")); + assertThat(extractValue("my-template.settings.index.number_of_replicas", templates), equalTo("0")); + assertThat(extractValue("my-template.mappings.properties.host_name.type", templates), equalTo("keyword")); + assertThat(extractValue("my-template.mappings.properties.description.type", templates), equalTo("text")); + assertThat((Map) extractValue("my-template.aliases.alias-1", templates), hasEntry("index_routing", "abc")); + assertThat((Map) extractValue("my-template.aliases.{index}-write", templates), hasEntry("search_routing", "xyz")); + } public void testPutTemplateBadRequests() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -1489,68 +1583,168 @@ public void testInvalidValidateQuery() throws IOException{ assertFalse(response.isValid()); } + // Tests the deprecated form of the API that returns templates with doc types (using the server-side's GetIndexTemplateResponse) + public void testCRUDIndexTemplateWithTypes() throws Exception { + RestHighLevelClient client = highLevelClient(); + + org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest putTemplate1 = + new org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest().name("template-1") + .patterns(Arrays.asList("pattern-1", "name-1")).alias(new Alias("alias-1")); + assertThat(execute(putTemplate1, client.indices()::putTemplate, client.indices()::putTemplateAsync + , expectWarnings(RestPutIndexTemplateAction.TYPES_DEPRECATION_MESSAGE)) + .isAcknowledged(), equalTo(true)); + org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest putTemplate2 = + new org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest().name("template-2") + .patterns(Arrays.asList("pattern-2", "name-2")) + .mapping("custom_doc_type", "name", "type=text") + .settings(Settings.builder().put("number_of_shards", "2").put("number_of_replicas", "0")); + assertThat(execute(putTemplate2, client.indices()::putTemplate, client.indices()::putTemplateAsync, + expectWarnings(RestPutIndexTemplateAction.TYPES_DEPRECATION_MESSAGE)) + .isAcknowledged(), equalTo(true)); + + org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse getTemplate1 = execute( + new GetIndexTemplatesRequest("template-1"), + client.indices()::getTemplate, client.indices()::getTemplateAsync, + expectWarnings(RestGetIndexTemplateAction.TYPES_DEPRECATION_MESSAGE)); + assertThat(getTemplate1.getIndexTemplates(), hasSize(1)); + org.elasticsearch.cluster.metadata.IndexTemplateMetaData template1 = getTemplate1.getIndexTemplates().get(0); + assertThat(template1.name(), equalTo("template-1")); + assertThat(template1.patterns(), contains("pattern-1", "name-1")); + assertTrue(template1.aliases().containsKey("alias-1")); + + //Check the typed version of the call + org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse getTemplate2 = + execute(new GetIndexTemplatesRequest("template-2"), + client.indices()::getTemplate, client.indices()::getTemplateAsync, + expectWarnings(RestGetIndexTemplateAction.TYPES_DEPRECATION_MESSAGE)); + assertThat(getTemplate2.getIndexTemplates(), hasSize(1)); + org.elasticsearch.cluster.metadata.IndexTemplateMetaData template2 = getTemplate2.getIndexTemplates().get(0); + assertThat(template2.name(), equalTo("template-2")); + assertThat(template2.patterns(), contains("pattern-2", "name-2")); + assertTrue(template2.aliases().isEmpty()); + assertThat(template2.settings().get("index.number_of_shards"), equalTo("2")); + assertThat(template2.settings().get("index.number_of_replicas"), equalTo("0")); + // Ugly deprecated form of API requires use of doc type to get at mapping object which is CompressedXContent + assertTrue(template2.mappings().containsKey("custom_doc_type")); + + List names = randomBoolean() + ? Arrays.asList("*-1", "template-2") + : Arrays.asList("template-*"); + GetIndexTemplatesRequest getBothRequest = new GetIndexTemplatesRequest(names); + org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse getBoth = execute( + getBothRequest, client.indices()::getTemplate, client.indices()::getTemplateAsync, + expectWarnings(RestGetIndexTemplateAction.TYPES_DEPRECATION_MESSAGE)); + assertThat(getBoth.getIndexTemplates(), hasSize(2)); + assertThat(getBoth.getIndexTemplates().stream().map(org.elasticsearch.cluster.metadata.IndexTemplateMetaData::getName).toArray(), + arrayContainingInAnyOrder("template-1", "template-2")); + + GetIndexTemplatesRequest getAllRequest = new GetIndexTemplatesRequest(); + org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse getAll = execute( + getAllRequest, client.indices()::getTemplate, client.indices()::getTemplateAsync, + expectWarnings(RestGetIndexTemplateAction.TYPES_DEPRECATION_MESSAGE)); + assertThat(getAll.getIndexTemplates().size(), greaterThanOrEqualTo(2)); + assertThat(getAll.getIndexTemplates().stream().map(org.elasticsearch.cluster.metadata.IndexTemplateMetaData::getName) + .collect(Collectors.toList()), + hasItems("template-1", "template-2")); + + assertTrue(execute(new DeleteIndexTemplateRequest("template-1"), + client.indices()::deleteTemplate, client.indices()::deleteTemplateAsync).isAcknowledged()); + assertThat(expectThrows(ElasticsearchException.class, () -> execute(new GetIndexTemplatesRequest("template-1"), + client.indices()::getTemplate, client.indices()::getTemplateAsync)).status(), equalTo(RestStatus.NOT_FOUND)); + assertThat(expectThrows(ElasticsearchException.class, () -> execute(new DeleteIndexTemplateRequest("template-1"), + client.indices()::deleteTemplate, client.indices()::deleteTemplateAsync)).status(), equalTo(RestStatus.NOT_FOUND)); + + assertThat(execute(new GetIndexTemplatesRequest("template-*"), + client.indices()::getTemplate, client.indices()::getTemplateAsync, + expectWarnings(RestGetIndexTemplateAction.TYPES_DEPRECATION_MESSAGE)).getIndexTemplates(), hasSize(1)); + assertThat(execute(new GetIndexTemplatesRequest("template-*"), + client.indices()::getTemplate, client.indices()::getTemplateAsync, + expectWarnings(RestGetIndexTemplateAction.TYPES_DEPRECATION_MESSAGE)).getIndexTemplates() + .get(0).name(), equalTo("template-2")); + + assertTrue(execute(new DeleteIndexTemplateRequest("template-*"), + client.indices()::deleteTemplate, client.indices()::deleteTemplateAsync).isAcknowledged()); + assertThat(expectThrows(ElasticsearchException.class, () -> execute(new GetIndexTemplatesRequest("template-*"), + client.indices()::getTemplate, client.indices()::getTemplateAsync, + expectWarnings(RestGetIndexTemplateAction.TYPES_DEPRECATION_MESSAGE))).status(), equalTo(RestStatus.NOT_FOUND)); + } + + public void testCRUDIndexTemplate() throws Exception { RestHighLevelClient client = highLevelClient(); - PutIndexTemplateRequest putTemplate1 = new PutIndexTemplateRequest().name("template-1") + PutIndexTemplateRequest putTemplate1 = new PutIndexTemplateRequest("template-1") .patterns(Arrays.asList("pattern-1", "name-1")).alias(new Alias("alias-1")); assertThat(execute(putTemplate1, client.indices()::putTemplate, client.indices()::putTemplateAsync).isAcknowledged(), equalTo(true)); - PutIndexTemplateRequest putTemplate2 = new PutIndexTemplateRequest().name("template-2") + PutIndexTemplateRequest putTemplate2 = new PutIndexTemplateRequest("template-2") .patterns(Arrays.asList("pattern-2", "name-2")) + .mapping("{\"properties\": { \"name\": { \"type\": \"text\" }}}", XContentType.JSON) .settings(Settings.builder().put("number_of_shards", "2").put("number_of_replicas", "0")); - assertThat(execute(putTemplate2, client.indices()::putTemplate, client.indices()::putTemplateAsync).isAcknowledged(), - equalTo(true)); + assertThat(execute(putTemplate2, client.indices()::putTemplate, client.indices()::putTemplateAsync) + .isAcknowledged(), equalTo(true)); - GetIndexTemplatesResponse getTemplate1 = execute(new GetIndexTemplatesRequest("template-1"), - client.indices()::getTemplate, client.indices()::getTemplateAsync); + GetIndexTemplatesResponse getTemplate1 = execute( + new GetIndexTemplatesRequest("template-1"), + client.indices()::getIndexTemplate, client.indices()::getIndexTemplateAsync); assertThat(getTemplate1.getIndexTemplates(), hasSize(1)); IndexTemplateMetaData template1 = getTemplate1.getIndexTemplates().get(0); assertThat(template1.name(), equalTo("template-1")); assertThat(template1.patterns(), contains("pattern-1", "name-1")); assertTrue(template1.aliases().containsKey("alias-1")); - + GetIndexTemplatesResponse getTemplate2 = execute(new GetIndexTemplatesRequest("template-2"), - client.indices()::getTemplate, client.indices()::getTemplateAsync); + client.indices()::getIndexTemplate, client.indices()::getIndexTemplateAsync); assertThat(getTemplate2.getIndexTemplates(), hasSize(1)); IndexTemplateMetaData template2 = getTemplate2.getIndexTemplates().get(0); assertThat(template2.name(), equalTo("template-2")); assertThat(template2.patterns(), contains("pattern-2", "name-2")); assertTrue(template2.aliases().isEmpty()); assertThat(template2.settings().get("index.number_of_shards"), equalTo("2")); - assertThat(template2.settings().get("index.number_of_replicas"), equalTo("0")); + assertThat(template2.settings().get("index.number_of_replicas"), equalTo("0")); + // New API returns a MappingMetaData class rather than CompressedXContent for the mapping + assertTrue(template2.mappings().sourceAsMap().containsKey("properties")); + @SuppressWarnings("unchecked") + Map props = (Map) template2.mappings().sourceAsMap().get("properties"); + assertTrue(props.containsKey("name")); + + List names = randomBoolean() ? Arrays.asList("*-1", "template-2") : Arrays.asList("template-*"); GetIndexTemplatesRequest getBothRequest = new GetIndexTemplatesRequest(names); - GetIndexTemplatesResponse getBoth = execute(getBothRequest, client.indices()::getTemplate, client.indices()::getTemplateAsync); + GetIndexTemplatesResponse getBoth = execute( + getBothRequest, client.indices()::getIndexTemplate, client.indices()::getIndexTemplateAsync); assertThat(getBoth.getIndexTemplates(), hasSize(2)); - assertThat(getBoth.getIndexTemplates().stream().map(IndexTemplateMetaData::getName).toArray(), + assertThat(getBoth.getIndexTemplates().stream().map(IndexTemplateMetaData::name).toArray(), arrayContainingInAnyOrder("template-1", "template-2")); GetIndexTemplatesRequest getAllRequest = new GetIndexTemplatesRequest(); - GetIndexTemplatesResponse getAll = execute(getAllRequest, client.indices()::getTemplate, client.indices()::getTemplateAsync); + GetIndexTemplatesResponse getAll = execute( + getAllRequest, client.indices()::getIndexTemplate, client.indices()::getIndexTemplateAsync); assertThat(getAll.getIndexTemplates().size(), greaterThanOrEqualTo(2)); - assertThat(getAll.getIndexTemplates().stream().map(IndexTemplateMetaData::getName).collect(Collectors.toList()), + assertThat(getAll.getIndexTemplates().stream().map(IndexTemplateMetaData::name) + .collect(Collectors.toList()), hasItems("template-1", "template-2")); assertTrue(execute(new DeleteIndexTemplateRequest("template-1"), client.indices()::deleteTemplate, client.indices()::deleteTemplateAsync).isAcknowledged()); assertThat(expectThrows(ElasticsearchException.class, () -> execute(new GetIndexTemplatesRequest("template-1"), - client.indices()::getTemplate, client.indices()::getTemplateAsync)).status(), equalTo(RestStatus.NOT_FOUND)); + client.indices()::getIndexTemplate, client.indices()::getIndexTemplateAsync)).status(), equalTo(RestStatus.NOT_FOUND)); assertThat(expectThrows(ElasticsearchException.class, () -> execute(new DeleteIndexTemplateRequest("template-1"), client.indices()::deleteTemplate, client.indices()::deleteTemplateAsync)).status(), equalTo(RestStatus.NOT_FOUND)); assertThat(execute(new GetIndexTemplatesRequest("template-*"), - client.indices()::getTemplate, client.indices()::getTemplateAsync).getIndexTemplates(), hasSize(1)); + client.indices()::getIndexTemplate, client.indices()::getIndexTemplateAsync).getIndexTemplates(), hasSize(1)); assertThat(execute(new GetIndexTemplatesRequest("template-*"), - client.indices()::getTemplate, client.indices()::getTemplateAsync).getIndexTemplates().get(0).name(), equalTo("template-2")); + client.indices()::getIndexTemplate, client.indices()::getIndexTemplateAsync).getIndexTemplates() + .get(0).name(), equalTo("template-2")); assertTrue(execute(new DeleteIndexTemplateRequest("template-*"), client.indices()::deleteTemplate, client.indices()::deleteTemplateAsync).isAcknowledged()); assertThat(expectThrows(ElasticsearchException.class, () -> execute(new GetIndexTemplatesRequest("template-*"), - client.indices()::getTemplate, client.indices()::getTemplateAsync)).status(), equalTo(RestStatus.NOT_FOUND)); + client.indices()::getIndexTemplate, client.indices()::getIndexTemplateAsync)).status(), equalTo(RestStatus.NOT_FOUND)); } public void testIndexTemplatesExist() throws Exception { @@ -1559,8 +1753,7 @@ public void testIndexTemplatesExist() throws Exception { { for (String suffix : Arrays.asList("1", "2")) { - final PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest() - .name("template-" + suffix) + final PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest("template-" + suffix) .patterns(Arrays.asList("pattern-" + suffix, "name-" + suffix)) .alias(new Alias("alias-" + suffix)); assertTrue(execute(putRequest, client.indices()::putTemplate, client.indices()::putTemplateAsync).isAcknowledged()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java index 409690eaa6997..6d873ec2b944c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java @@ -45,7 +45,6 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.client.indices.CreateIndexRequest; @@ -53,6 +52,7 @@ import org.elasticsearch.client.indices.GetIndexTemplatesRequest; import org.elasticsearch.client.indices.GetMappingsRequest; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; +import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; import org.elasticsearch.client.indices.RandomCreateIndexGenerator; import org.elasticsearch.common.CheckedFunction; @@ -60,6 +60,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; import org.junit.Assert; @@ -921,14 +922,16 @@ public void testIndexPutSettings() throws IOException { Assert.assertEquals(expectedParams, request.getParameters()); } - public void testPutTemplateRequest() throws Exception { + public void testPutTemplateRequestWithTypes() throws Exception { Map names = new HashMap<>(); names.put("log", "log"); names.put("template#1", "template%231"); names.put("-#template", "-%23template"); names.put("foo^bar", "foo%5Ebar"); - PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest().name(ESTestCase.randomFrom(names.keySet())) + org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest putTemplateRequest = + new org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest() + .name(ESTestCase.randomFrom(names.keySet())) .patterns(Arrays.asList(ESTestCase.generateRandomStringArray(20, 100, false, false))); if (ESTestCase.randomBoolean()) { putTemplateRequest.order(ESTestCase.randomInt()); @@ -939,14 +942,15 @@ public void testPutTemplateRequest() throws Exception { if (ESTestCase.randomBoolean()) { putTemplateRequest.settings(Settings.builder().put("setting-" + ESTestCase.randomInt(), ESTestCase.randomTimeValue())); } + Map expectedParams = new HashMap<>(); if (ESTestCase.randomBoolean()) { putTemplateRequest.mapping("doc-" + ESTestCase.randomInt(), "field-" + ESTestCase.randomInt(), "type=" + ESTestCase.randomFrom("text", "keyword")); } + expectedParams.put(INCLUDE_TYPE_NAME_PARAMETER, "true"); if (ESTestCase.randomBoolean()) { putTemplateRequest.alias(new Alias("alias-" + ESTestCase.randomInt())); } - Map expectedParams = new HashMap<>(); if (ESTestCase.randomBoolean()) { expectedParams.put("create", Boolean.TRUE.toString()); putTemplateRequest.create(true); @@ -955,9 +959,8 @@ public void testPutTemplateRequest() throws Exception { String cause = ESTestCase.randomUnicodeOfCodepointLengthBetween(1, 50); putTemplateRequest.cause(cause); expectedParams.put("cause", cause); - } + } RequestConvertersTests.setRandomMasterTimeout(putTemplateRequest, expectedParams); - expectedParams.put(INCLUDE_TYPE_NAME_PARAMETER, "true"); Request request = IndicesRequestConverters.putTemplate(putTemplateRequest); Assert.assertThat(request.getEndpoint(), equalTo("/_template/" + names.get(putTemplateRequest.name()))); @@ -965,6 +968,49 @@ public void testPutTemplateRequest() throws Exception { RequestConvertersTests.assertToXContentBody(putTemplateRequest, request.getEntity()); } + public void testPutTemplateRequest() throws Exception { + Map names = new HashMap<>(); + names.put("log", "log"); + names.put("template#1", "template%231"); + names.put("-#template", "-%23template"); + names.put("foo^bar", "foo%5Ebar"); + + PutIndexTemplateRequest putTemplateRequest = + new PutIndexTemplateRequest(ESTestCase.randomFrom(names.keySet())) + .patterns(Arrays.asList(ESTestCase.generateRandomStringArray(20, 100, false, false))); + if (ESTestCase.randomBoolean()) { + putTemplateRequest.order(ESTestCase.randomInt()); + } + if (ESTestCase.randomBoolean()) { + putTemplateRequest.version(ESTestCase.randomInt()); + } + if (ESTestCase.randomBoolean()) { + putTemplateRequest.settings(Settings.builder().put("setting-" + ESTestCase.randomInt(), ESTestCase.randomTimeValue())); + } + Map expectedParams = new HashMap<>(); + if (ESTestCase.randomBoolean()) { + putTemplateRequest.mapping("{ \"properties\": { \"field-" + ESTestCase.randomInt() + + "\" : { \"type\" : \"" + ESTestCase.randomFrom("text", "keyword") + "\" }}}", XContentType.JSON); + } + if (ESTestCase.randomBoolean()) { + putTemplateRequest.alias(new Alias("alias-" + ESTestCase.randomInt())); + } + if (ESTestCase.randomBoolean()) { + expectedParams.put("create", Boolean.TRUE.toString()); + putTemplateRequest.create(true); + } + if (ESTestCase.randomBoolean()) { + String cause = ESTestCase.randomUnicodeOfCodepointLengthBetween(1, 50); + putTemplateRequest.cause(cause); + expectedParams.put("cause", cause); + } + RequestConvertersTests.setRandomMasterTimeout(putTemplateRequest, expectedParams); + + Request request = IndicesRequestConverters.putTemplate(putTemplateRequest); + Assert.assertThat(request.getEndpoint(), equalTo("/_template/" + names.get(putTemplateRequest.name()))); + Assert.assertThat(request.getParameters(), equalTo(expectedParams)); + RequestConvertersTests.assertToXContentBody(putTemplateRequest, request.getEntity()); + } public void testValidateQuery() throws Exception { String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); String[] types = ESTestCase.randomBoolean() ? ESTestCase.generateRandomStringArray(5, 5, false, false) : null; @@ -1012,9 +1058,9 @@ public void testGetTemplateRequest() throws Exception { Map expectedParams = new HashMap<>(); RequestConvertersTests.setRandomMasterTimeout(getTemplatesRequest::setMasterNodeTimeout, expectedParams); RequestConvertersTests.setRandomLocal(getTemplatesRequest::setLocal, expectedParams); - expectedParams.put(INCLUDE_TYPE_NAME_PARAMETER, "true"); - Request request = IndicesRequestConverters.getTemplates(getTemplatesRequest); + Request request = IndicesRequestConverters.getTemplatesWithDocumentTypes(getTemplatesRequest); + expectedParams.put(INCLUDE_TYPE_NAME_PARAMETER, "true"); Assert.assertThat(request.getEndpoint(), equalTo("/_template/" + names.stream().map(encodes::get).collect(Collectors.joining(",")))); Assert.assertThat(request.getParameters(), equalTo(expectedParams)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index b472837f9c25d..11faaf879729d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -61,6 +61,7 @@ import org.elasticsearch.client.ml.PutFilterRequest; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.RevertModelSnapshotRequest; +import org.elasticsearch.client.ml.SetUpgradeModeRequest; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedRequestTests; import org.elasticsearch.client.ml.StopDatafeedRequest; @@ -818,6 +819,22 @@ public void testFindFileStructure() throws Exception { assertEquals(sample, requestEntityToString(request)); } + public void testSetUpgradeMode() { + SetUpgradeModeRequest setUpgradeModeRequest = new SetUpgradeModeRequest(true); + + Request request = MLRequestConverters.setUpgradeMode(setUpgradeModeRequest); + assertThat(request.getEndpoint(), equalTo("/_ml/set_upgrade_mode")); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(request.getParameters().get(SetUpgradeModeRequest.ENABLED.getPreferredName()), equalTo(Boolean.toString(true))); + assertThat(request.getParameters().containsKey(SetUpgradeModeRequest.TIMEOUT.getPreferredName()), is(false)); + + setUpgradeModeRequest.setTimeout(TimeValue.timeValueHours(1)); + setUpgradeModeRequest.setEnabled(false); + request = MLRequestConverters.setUpgradeMode(setUpgradeModeRequest); + assertThat(request.getParameters().get(SetUpgradeModeRequest.ENABLED.getPreferredName()), equalTo(Boolean.toString(false))); + assertThat(request.getParameters().get(SetUpgradeModeRequest.TIMEOUT.getPreferredName()), is("1h")); + } + private static Job createValidJob(String jobId) { AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList( Detector.builder().setFunction("count").build())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 162fceed9e572..07d7187fd1d70 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -84,6 +84,7 @@ import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.RevertModelSnapshotRequest; import org.elasticsearch.client.ml.RevertModelSnapshotResponse; +import org.elasticsearch.client.ml.SetUpgradeModeRequest; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedResponse; import org.elasticsearch.client.ml.StopDatafeedRequest; @@ -1614,4 +1615,30 @@ public void testFindFileStructure() throws IOException { assertEquals("timestamp", structure.getTimestampField()); assertFalse(structure.needClientTimezone()); } + + public void testEnableUpgradeMode() throws Exception { + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + MlInfoResponse mlInfoResponse = machineLearningClient.getMlInfo(new MlInfoRequest(), RequestOptions.DEFAULT); + assertThat(mlInfoResponse.getInfo().get("upgrade_mode"), equalTo(false)); + + AcknowledgedResponse setUpgrademodeResponse = execute(new SetUpgradeModeRequest(true), + machineLearningClient::setUpgradeMode, + machineLearningClient::setUpgradeModeAsync); + + assertThat(setUpgrademodeResponse.isAcknowledged(), is(true)); + + + mlInfoResponse = machineLearningClient.getMlInfo(new MlInfoRequest(), RequestOptions.DEFAULT); + assertThat(mlInfoResponse.getInfo().get("upgrade_mode"), equalTo(true)); + + setUpgrademodeResponse = execute(new SetUpgradeModeRequest(false), + machineLearningClient::setUpgradeMode, + machineLearningClient::setUpgradeModeAsync); + + assertThat(setUpgrademodeResponse.isAcknowledged(), is(true)); + + mlInfoResponse = machineLearningClient.getMlInfo(new MlInfoRequest(), RequestOptions.DEFAULT); + assertThat(mlInfoResponse.getInfo().get("upgrade_mode"), equalTo(false)); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index a20d78f939da5..945c1df19efa3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -744,6 +744,14 @@ public void testApiNamingConventions() throws Exception { .collect(Collectors.groupingBy(Tuple::v1, Collectors.mapping(Tuple::v2, Collectors.toSet()))); + // TODO remove in 8.0 - we will undeprecate indices.get_template because the current getIndexTemplate + // impl will replace the existing getTemplate method. + // The above general-purpose code ignores all deprecated methods which in this case leaves `getTemplate` + // looking like it doesn't have a valid implementatation when it does. + apiUnsupported.remove("indices.get_template"); + + + for (Map.Entry> entry : methods.entrySet()) { String apiName = entry.getKey(); @@ -776,7 +784,10 @@ public void testApiNamingConventions() throws Exception { apiName.startsWith("security.") == false && apiName.startsWith("index_lifecycle.") == false && apiName.startsWith("ccr.") == false && - apiName.endsWith("freeze") == false) { + apiName.endsWith("freeze") == false && + // IndicesClientIT.getIndexTemplate should be renamed "getTemplate" in version 8.0 when we + // can get rid of 7.0's deprecated "getTemplate" + apiName.equals("indices.get_index_template") == false) { apiNotFound.add(apiName); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java index 9ef01a1a6f7bf..2e54d1c4a1a7c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -97,7 +98,8 @@ public void testPutFollow() throws Exception { PutFollowRequest putFollowRequest = new PutFollowRequest( "local", // <1> "leader", // <2> - "follower" // <3> + "follower", // <3> + ActiveShardCount.ONE // <4> ); // end::ccr-put-follow-request @@ -175,7 +177,7 @@ public void testPauseFollow() throws Exception { String followIndex = "follower"; // Follow index, so that it can be paused: { - PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex); + PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex, ActiveShardCount.ONE); PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT); assertThat(putFollowResponse.isFollowIndexCreated(), is(true)); assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true)); @@ -241,7 +243,7 @@ public void testResumeFollow() throws Exception { String followIndex = "follower"; // Follow index, so that it can be paused: { - PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex); + PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex, ActiveShardCount.ONE); PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT); assertThat(putFollowResponse.isFollowIndexCreated(), is(true)); assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true)); @@ -317,7 +319,7 @@ public void testUnfollow() throws Exception { String followIndex = "follower"; // Follow index, pause and close, so that it can be unfollowed: { - PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex); + PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex, ActiveShardCount.ONE); PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT); assertThat(putFollowResponse.isFollowIndexCreated(), is(true)); assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true)); @@ -349,7 +351,7 @@ public void testUnfollow() throws Exception { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(followIndex); assertThat(client.indices().delete(deleteIndexRequest, RequestOptions.DEFAULT).isAcknowledged(), is(true)); - PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex); + PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex, ActiveShardCount.ONE); PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT); assertThat(putFollowResponse.isFollowIndexCreated(), is(true)); assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true)); @@ -639,7 +641,7 @@ public void testGetFollowStats() throws Exception { } { // Follow index, so that we can query for follow stats: - PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", "follower"); + PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", "follower", ActiveShardCount.ONE); PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT); assertThat(putFollowResponse.isFollowIndexCreated(), is(true)); assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index cfd5ac4de2f82..d358655f2355a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -55,8 +55,6 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; -import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.admin.indices.validate.query.QueryExplanation; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; @@ -76,11 +74,13 @@ import org.elasticsearch.client.indices.GetIndexTemplatesRequest; import org.elasticsearch.client.indices.GetMappingsRequest; import org.elasticsearch.client.indices.GetMappingsResponse; +import org.elasticsearch.client.indices.GetIndexTemplatesResponse; +import org.elasticsearch.client.indices.IndexTemplateMetaData; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; +import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; import org.elasticsearch.client.indices.UnfreezeIndexRequest; import org.elasticsearch.cluster.metadata.AliasMetaData; -import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; @@ -2094,16 +2094,14 @@ public void testPutTemplate() throws Exception { { // tag::put-template-request-mappings-json - request.mapping("_doc", // <1> + request.mapping(// <1> "{\n" + - " \"_doc\": {\n" + - " \"properties\": {\n" + - " \"message\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + + " \"properties\": {\n" + + " \"message\": {\n" + + " \"type\": \"text\"\n" + " }\n" + " }\n" + - "}", // <2> + "}", XContentType.JSON); // end::put-template-request-mappings-json assertTrue(client.indices().putTemplate(request, RequestOptions.DEFAULT).isAcknowledged()); @@ -2111,14 +2109,16 @@ public void testPutTemplate() throws Exception { { //tag::put-template-request-mappings-map Map jsonMap = new HashMap<>(); - Map message = new HashMap<>(); - message.put("type", "text"); - Map properties = new HashMap<>(); - properties.put("message", message); - Map mapping = new HashMap<>(); - mapping.put("properties", properties); - jsonMap.put("_doc", mapping); - request.mapping("_doc", jsonMap); // <1> + { + Map properties = new HashMap<>(); + { + Map message = new HashMap<>(); + message.put("type", "text"); + properties.put("message", message); + } + jsonMap.put("properties", properties); + } + request.mapping(jsonMap); // <1> //end::put-template-request-mappings-map assertTrue(client.indices().putTemplate(request, RequestOptions.DEFAULT).isAcknowledged()); } @@ -2127,31 +2127,21 @@ public void testPutTemplate() throws Exception { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); { - builder.startObject("_doc"); + builder.startObject("properties"); { - builder.startObject("properties"); + builder.startObject("message"); { - builder.startObject("message"); - { - builder.field("type", "text"); - } - builder.endObject(); + builder.field("type", "text"); } builder.endObject(); } builder.endObject(); } builder.endObject(); - request.mapping("_doc", builder); // <1> + request.mapping(builder); // <1> //end::put-template-request-mappings-xcontent assertTrue(client.indices().putTemplate(request, RequestOptions.DEFAULT).isAcknowledged()); } - { - //tag::put-template-request-mappings-shortcut - request.mapping("_doc", "message", "type=text"); // <1> - //end::put-template-request-mappings-shortcut - assertTrue(client.indices().putTemplate(request, RequestOptions.DEFAULT).isAcknowledged()); - } // tag::put-template-request-aliases request.alias(new Alias("twitter_alias").filter(QueryBuilders.termQuery("user", "kimchy"))); // <1> @@ -2177,11 +2167,9 @@ public void testPutTemplate() throws Exception { " \"number_of_shards\": 1\n" + " },\n" + " \"mappings\": {\n" + - " \"_doc\": {\n" + - " \"properties\": {\n" + - " \"message\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + + " \"properties\": {\n" + + " \"message\": {\n" + + " \"type\": \"text\"\n" + " }\n" + " }\n" + " },\n" + @@ -2244,13 +2232,11 @@ public void testGetTemplates() throws Exception { PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest("my-template"); putRequest.patterns(Arrays.asList("pattern-1", "log-*")); putRequest.settings(Settings.builder().put("index.number_of_shards", 3).put("index.number_of_replicas", 1)); - putRequest.mapping("_doc", + putRequest.mapping( "{\n" + - " \"_doc\": {\n" + - " \"properties\": {\n" + - " \"message\": {\n" + - " \"type\": \"text\"\n" + - " }\n" + + " \"properties\": {\n" + + " \"message\": {\n" + + " \"type\": \"text\"\n" + " }\n" + " }\n" + "}", XContentType.JSON); @@ -2269,7 +2255,7 @@ public void testGetTemplates() throws Exception { // end::get-templates-request-masterTimeout // tag::get-templates-execute - GetIndexTemplatesResponse getTemplatesResponse = client.indices().getTemplate(request, RequestOptions.DEFAULT); + GetIndexTemplatesResponse getTemplatesResponse = client.indices().getIndexTemplate(request, RequestOptions.DEFAULT); // end::get-templates-execute // tag::get-templates-response @@ -2299,7 +2285,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::get-templates-execute-async - client.indices().getTemplateAsync(request, RequestOptions.DEFAULT, listener); // <1> + client.indices().getIndexTemplateAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::get-templates-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 68881206b487f..f4e3f86196f29 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -99,6 +99,7 @@ import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.RevertModelSnapshotRequest; import org.elasticsearch.client.ml.RevertModelSnapshotResponse; +import org.elasticsearch.client.ml.SetUpgradeModeRequest; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedResponse; import org.elasticsearch.client.ml.StopDatafeedRequest; @@ -3078,6 +3079,57 @@ public void onFailure(Exception e) { } } + public void testSetUpgradeMode() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + // tag::set-upgrade-mode-request + SetUpgradeModeRequest request = new SetUpgradeModeRequest(true); // <1> + request.setTimeout(TimeValue.timeValueMinutes(10)); // <2> + // end::set-upgrade-mode-request + + // Set to false so that the cluster setting does not have to be unset at the end of the test. + request.setEnabled(false); + + // tag::set-upgrade-mode-execute + AcknowledgedResponse acknowledgedResponse = client.machineLearning().setUpgradeMode(request, RequestOptions.DEFAULT); + // end::set-upgrade-mode-execute + + // tag::set-upgrade-mode-response + boolean acknowledged = acknowledgedResponse.isAcknowledged(); // <1> + // end::set-upgrade-mode-response + assertThat(acknowledged, is(true)); + } + { + SetUpgradeModeRequest request = new SetUpgradeModeRequest(false); + + // tag::set-upgrade-mode-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::set-upgrade-mode-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::set-upgrade-mode-execute-async + client.machineLearning() + .setUpgradeModeAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::set-upgrade-mode-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + } + } + private String createFilter(RestHighLevelClient client) throws IOException { MlFilter.Builder filterBuilder = MlFilter.builder("my_safe_domains") .setDescription("A list of safe domains") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetIndexTemplatesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetIndexTemplatesResponseTests.java new file mode 100644 index 0000000000000..d2f0c3d7eba88 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetIndexTemplatesResponseTests.java @@ -0,0 +1,136 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class GetIndexTemplatesResponseTests extends ESTestCase { + + static final String mappingString = "{\"properties\":{" + + "\"f1\": {\"type\":\"text\"}," + + "\"f2\": {\"type\":\"keyword\"}" + + "}}"; + + + public void testFromXContent() throws IOException { + xContentTester(this::createParser, GetIndexTemplatesResponseTests::createTestInstance, GetIndexTemplatesResponseTests::toXContent, + GetIndexTemplatesResponse::fromXContent).supportsUnknownFields(false) + .assertEqualsConsumer(GetIndexTemplatesResponseTests::assertEqualInstances) + .shuffleFieldsExceptions(new String[] {"aliases", "mappings", "patterns", "settings"}) + .test(); + } + + private static void assertEqualInstances(GetIndexTemplatesResponse expectedInstance, GetIndexTemplatesResponse newInstance) { + assertEquals(expectedInstance, newInstance); + // Check there's no doc types at the root of the mapping + Map expectedMap = XContentHelper.convertToMap( + new BytesArray(mappingString), true, XContentType.JSON).v2(); + for (IndexTemplateMetaData template : newInstance.getIndexTemplates()) { + MappingMetaData mappingMD = template.mappings(); + if(mappingMD!=null) { + Map mappingAsMap = mappingMD.sourceAsMap(); + assertEquals(expectedMap, mappingAsMap); + } + } + } + + static GetIndexTemplatesResponse createTestInstance() { + List templates = new ArrayList<>(); + int numTemplates = between(0, 10); + for (int t = 0; t < numTemplates; t++) { + IndexTemplateMetaData.Builder templateBuilder = IndexTemplateMetaData.builder("template-" + t); + templateBuilder.patterns(IntStream.range(0, between(1, 5)).mapToObj(i -> "pattern-" + i).collect(Collectors.toList())); + int numAlias = between(0, 5); + for (int i = 0; i < numAlias; i++) { + templateBuilder.putAlias(AliasMetaData.builder(randomAlphaOfLengthBetween(1, 10))); + } + if (randomBoolean()) { + templateBuilder.settings(Settings.builder().put("index.setting-1", randomLong())); + } + if (randomBoolean()) { + templateBuilder.order(randomInt()); + } + if (randomBoolean()) { + templateBuilder.version(between(0, 100)); + } + if (randomBoolean()) { + try { + Map map = XContentHelper.convertToMap(new BytesArray(mappingString), true, XContentType.JSON).v2(); + MappingMetaData mapping = new MappingMetaData(MapperService.SINGLE_MAPPING_NAME, map); + templateBuilder.mapping(mapping); + } catch (IOException ex) { + throw new UncheckedIOException(ex); + } + } + templates.add(templateBuilder.build()); + } + return new GetIndexTemplatesResponse(templates); + } + + // As the client class GetIndexTemplatesResponse doesn't have toXContent method, adding this method here only for the test + static void toXContent(GetIndexTemplatesResponse response, XContentBuilder builder) throws IOException { + + //Create a server-side counterpart for the client-side class and call toXContent on it + + List serverIndexTemplates = new ArrayList<>(); + List clientIndexTemplates = response.getIndexTemplates(); + for (IndexTemplateMetaData clientITMD : clientIndexTemplates) { + org.elasticsearch.cluster.metadata.IndexTemplateMetaData.Builder serverTemplateBuilder = + org.elasticsearch.cluster.metadata.IndexTemplateMetaData.builder(clientITMD.name()); + + serverTemplateBuilder.patterns(clientITMD.patterns()); + + Iterator aliases = clientITMD.aliases().valuesIt(); + aliases.forEachRemaining((a)->serverTemplateBuilder.putAlias(a)); + + serverTemplateBuilder.settings(clientITMD.settings()); + serverTemplateBuilder.order(clientITMD.order()); + serverTemplateBuilder.version(clientITMD.version()); + if (clientITMD.mappings() != null) { + serverTemplateBuilder.putMapping(MapperService.SINGLE_MAPPING_NAME, clientITMD.mappings().source()); + } + serverIndexTemplates.add(serverTemplateBuilder.build()); + + } + org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse serverResponse = new + org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse(serverIndexTemplates); + serverResponse.toXContent(builder, ToXContent.EMPTY_PARAMS); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutIndexTemplateRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutIndexTemplateRequestTests.java new file mode 100644 index 0000000000000..8aab973982fc0 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutIndexTemplateRequestTests.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indices; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Arrays; +import java.util.Collections; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.core.Is.is; + +public class PutIndexTemplateRequestTests extends AbstractXContentTestCase { + public void testValidateErrorMessage() throws Exception { + expectThrows(IllegalArgumentException.class, () -> new PutIndexTemplateRequest(null)); + expectThrows(IllegalArgumentException.class, () -> new PutIndexTemplateRequest("test").name(null)); + PutIndexTemplateRequest request = new PutIndexTemplateRequest("test"); + ActionRequestValidationException withoutPattern = request.validate(); + assertThat(withoutPattern.getMessage(), containsString("index patterns are missing")); + + request.name("foo"); + ActionRequestValidationException withoutIndexPatterns = request.validate(); + assertThat(withoutIndexPatterns.validationErrors(), hasSize(1)); + assertThat(withoutIndexPatterns.getMessage(), containsString("index patterns are missing")); + + request.patterns(Collections.singletonList("test-*")); + ActionRequestValidationException noError = request.validate(); + assertThat(noError, is(nullValue())); + } + + @Override + protected PutIndexTemplateRequest createTestInstance() { + PutIndexTemplateRequest request = new PutIndexTemplateRequest("test"); + if (randomBoolean()) { + request.version(randomInt()); + } + if (randomBoolean()) { + request.order(randomInt()); + } + request.patterns(Arrays.asList(generateRandomStringArray(20, 100, false, false))); + int numAlias = between(0, 5); + for (int i = 0; i < numAlias; i++) { + // some ASCII or Latin-1 control characters, especially newline, can lead to + // problems with yaml parsers, that's why we filter them here (see #30911) + Alias alias = new Alias(randomRealisticUnicodeOfLengthBetween(1, 10).replaceAll("\\p{Cc}", "")); + if (randomBoolean()) { + alias.indexRouting(randomRealisticUnicodeOfLengthBetween(1, 10)); + } + if (randomBoolean()) { + alias.searchRouting(randomRealisticUnicodeOfLengthBetween(1, 10)); + } + request.alias(alias); + } + if (randomBoolean()) { + try { + request.mapping(XContentFactory.jsonBuilder().startObject() + .startObject("properties") + .startObject("field-" + randomInt()).field("type", randomFrom("keyword", "text")).endObject() + .endObject().endObject()); + } catch (IOException ex) { + throw new UncheckedIOException(ex); + } + } + if (randomBoolean()) { + request.settings(Settings.builder().put("setting1", randomLong()).put("setting2", randomTimeValue()).build()); + } + return request; + } + + @Override + protected PutIndexTemplateRequest doParseInstance(XContentParser parser) throws IOException { + return new PutIndexTemplateRequest("test").source(parser.map()); + } + + @Override + protected void assertEqualInstances(PutIndexTemplateRequest expected, PutIndexTemplateRequest actual) { + assertNotSame(expected, actual); + assertThat(actual.version(), equalTo(expected.version())); + assertThat(actual.order(), equalTo(expected.order())); + assertThat(actual.patterns(), equalTo(expected.patterns())); + assertThat(actual.aliases(), equalTo(expected.aliases())); + assertThat(actual.mappings(), equalTo(expected.mappings())); + assertThat(actual.settings(), equalTo(expected.settings())); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index 7ab7e671ac214..3723e31b27f1e 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -105,13 +105,15 @@ task buildIntegTestZip(type: Zip) { with archiveFiles(transportModulesFiles, 'zip', true) } -task buildZip(type: Zip) { +task buildWindowsZip(type: Zip) { configure(commonZipConfig) + archiveClassifier = 'windows-x86_64' with archiveFiles(modulesFiles(false), 'zip', false) } -task buildOssZip(type: Zip) { +task buildOssWindowsZip(type: Zip) { configure(commonZipConfig) + archiveClassifier = 'windows-x86_64' with archiveFiles(modulesFiles(true), 'zip', true) } @@ -122,13 +124,27 @@ Closure commonTarConfig = { fileMode 0644 } -task buildTar(type: Tar) { +task buildDarwinTar(type: Tar) { configure(commonTarConfig) + archiveClassifier = 'darwin-x86_64' with archiveFiles(modulesFiles(false), 'tar', false) } -task buildOssTar(type: Tar) { +task buildOssDarwinTar(type: Tar) { configure(commonTarConfig) + archiveClassifier = 'darwin-x86_64' + with archiveFiles(modulesFiles(true), 'tar', true) +} + +task buildLinuxTar(type: Tar) { + configure(commonTarConfig) + archiveClassifier = 'linux-x86_64' + with archiveFiles(modulesFiles(false), 'tar', false) +} + +task buildOssLinuxTar(type: Tar) { + configure(commonTarConfig) + archiveClassifier = 'linux-x86_64' with archiveFiles(modulesFiles(true), 'tar', true) } diff --git a/distribution/archives/oss-tar/build.gradle b/distribution/archives/darwin-tar/build.gradle similarity index 100% rename from distribution/archives/oss-tar/build.gradle rename to distribution/archives/darwin-tar/build.gradle diff --git a/distribution/archives/oss-zip/build.gradle b/distribution/archives/linux-tar/build.gradle similarity index 100% rename from distribution/archives/oss-zip/build.gradle rename to distribution/archives/linux-tar/build.gradle diff --git a/distribution/archives/tar/build.gradle b/distribution/archives/oss-darwin-tar/build.gradle similarity index 100% rename from distribution/archives/tar/build.gradle rename to distribution/archives/oss-darwin-tar/build.gradle diff --git a/distribution/archives/zip/build.gradle b/distribution/archives/oss-linux-tar/build.gradle similarity index 100% rename from distribution/archives/zip/build.gradle rename to distribution/archives/oss-linux-tar/build.gradle diff --git a/distribution/archives/oss-windows-zip/build.gradle b/distribution/archives/oss-windows-zip/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/archives/oss-windows-zip/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/archives/windows-zip/build.gradle b/distribution/archives/windows-zip/build.gradle new file mode 100644 index 0000000000000..4a6dde5fc0c92 --- /dev/null +++ b/distribution/archives/windows-zip/build.gradle @@ -0,0 +1,2 @@ +// This file is intentionally blank. All configuration of the +// distribution is done in the parent project. diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 4557e0eb1dc4e..0ee597b449a41 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -122,17 +122,34 @@ bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unre Map artifactFiles = [:] List projectDirs = [] - List projects = ['zip', 'deb', 'rpm'] + List projects = ['deb', 'rpm'] + if (bwcVersion.onOrAfter('7.0.0')) { + projects.addAll(['windows-zip', 'darwin-tar', 'linux-tar']) + } else { + projects.add('zip') + } + for (String projectName : projects) { String baseDir = "distribution" + String classifier = "" + String extension = projectName + if (bwcVersion.onOrAfter('7.0.0') && (projectName.contains('zip') || projectName.contains('tar'))) { + int index = projectName.indexOf('-') + classifier = "-${projectName.substring(0, index)}-x86_64" + extension = projectName.substring(index + 1) + if (extension.equals('tar')) { + extension += '.gz' + } + } if (bwcVersion.onOrAfter('6.3.0')) { - baseDir += projectName == 'zip' ? '/archives' : '/packages' + baseDir += projectName.endsWith('zip') || projectName.endsWith('tar') ? '/archives' : '/packages' // add oss variant first projectDirs.add("${baseDir}/oss-${projectName}") - artifactFiles.put("oss-" + projectName, file("${checkoutDir}/${baseDir}/oss-${projectName}/build/distributions/elasticsearch-oss-${bwcVersion}-SNAPSHOT.${projectName}")) + artifactFiles.put("oss-" + projectName, file("${checkoutDir}/${baseDir}/oss-${projectName}/build/distributions/elasticsearch-oss-${bwcVersion}-SNAPSHOT${classifier}.${extension}")) } projectDirs.add("${baseDir}/${projectName}") - artifactFiles.put(projectName, file("${checkoutDir}/${baseDir}/${projectName}/build/distributions/elasticsearch-${bwcVersion}-SNAPSHOT.${projectName}")) + artifactFiles.put(projectName, + file("${checkoutDir}/${baseDir}/${projectName}/build/distributions/elasticsearch-${bwcVersion}-SNAPSHOT${classifier}.${extension}")) } Closure createRunBwcGradleTask = { name, extraConfig -> @@ -216,9 +233,15 @@ bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unre String artifactFileName = artifactFile.name String artifactName = artifactFileName.contains('oss') ? 'elasticsearch-oss' : 'elasticsearch' String suffix = artifactFile.toString()[-3..-1] + int archIndex = artifactFileName.indexOf('x86_64') + String classifier = '' + if (archIndex != -1) { + int osIndex = artifactFileName.lastIndexOf('-', archIndex - 2) + classifier = "${artifactFileName.substring(osIndex + 1, archIndex - 1)}-x86_64" + } configurations.create(projectName) artifacts { - it.add(projectName, [file: artifactFile, name: artifactName, type: suffix, builtBy: buildBwcVersion]) + it.add(projectName, [file: artifactFile, name: artifactName, classifier: classifier, type: suffix, builtBy: buildBwcVersion]) } } // make sure no dependencies were added to assemble; we want it to be a no-op diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index 4d228e3c44fdc..e4d27da1f1fb0 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -13,13 +13,14 @@ configurations { } dependencies { - dockerSource project(path: ":distribution:archives:tar") - ossDockerSource project(path: ":distribution:archives:oss-tar") + dockerSource project(path: ":distribution:archives:linux-tar") + ossDockerSource project(path: ":distribution:archives:oss-linux-tar") } ext.expansions = { oss -> + String classifier = 'linux-x86_64' return [ - 'elasticsearch' : oss ? "elasticsearch-oss-${VersionProperties.elasticsearch}.tar.gz" : "elasticsearch-${VersionProperties.elasticsearch}.tar.gz", + 'elasticsearch' : oss ? "elasticsearch-oss-${VersionProperties.elasticsearch}-${classifier}.tar.gz" : "elasticsearch-${VersionProperties.elasticsearch}-${classifier}.tar.gz", 'jdkUrl' : 'https://download.java.net/java/GA/jdk11/9/GPL/openjdk-11.0.2_linux-x64_bin.tar.gz', 'jdkVersion' : '11.0.2', 'license': oss ? 'Apache-2.0' : 'Elastic License', diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index 0b573ed9bad13..ad0f4a5cdec08 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -101,8 +101,9 @@ Closure commonPackageConfig(String type, boolean oss) { return { dependsOn "process${oss ? 'Oss' : ''}${type.capitalize()}Files" packageName "elasticsearch${oss ? '-oss' : ''}" + arch (type == 'deb' ? 'amd64' : 'X86_64') // Follow elasticsearch's file naming convention - archiveName "${packageName}-${project.version}.${type}" + archiveName "${packageName}-${project.version}-${archString}.${type}" String prefix = "${oss ? 'oss-' : ''}${type}" destinationDir = file("${prefix}/build/distributions") @@ -333,7 +334,6 @@ Closure commonRpmConfig(boolean oss) { packager 'Elasticsearch' version = project.version.replace('-', '_') release = '1' - arch 'NOARCH' os 'LINUX' distribution 'Elasticsearch' vendor 'Elasticsearch' diff --git a/docs/java-rest/high-level/ccr/put_follow.asciidoc b/docs/java-rest/high-level/ccr/put_follow.asciidoc index 839a9bedc6552..2f40bbd5d2b2d 100644 --- a/docs/java-rest/high-level/ccr/put_follow.asciidoc +++ b/docs/java-rest/high-level/ccr/put_follow.asciidoc @@ -20,6 +20,8 @@ include-tagged::{doc-tests-file}[{api}-request] <1> The name of the remote cluster alias. <2> The name of the leader in the remote cluster. <3> The name of the follower index that gets created as part of the put follow API call. +<4> The number of active shard copies to wait for before the put follow API returns a +response, as an `ActiveShardCount` [id="{upid}-{api}-response"] ==== Response diff --git a/docs/java-rest/high-level/indices/put_template.asciidoc b/docs/java-rest/high-level/indices/put_template.asciidoc index 7618fc8ce7af5..3e3954308736d 100644 --- a/docs/java-rest/high-level/indices/put_template.asciidoc +++ b/docs/java-rest/high-level/indices/put_template.asciidoc @@ -39,8 +39,7 @@ template's patterns. -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request-mappings-json] -------------------------------------------------- -<1> The type to define -<2> The mapping for this type, provided as a JSON string +<1> The mapping, provided as a JSON string The mapping source can be provided in different ways in addition to the `String` example shown above: @@ -59,13 +58,6 @@ include-tagged::{doc-tests-file}[{api}-request-mappings-xcontent] <1> Mapping source provided as an `XContentBuilder` object, the Elasticsearch built-in helpers to generate JSON content -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-request-mappings-shortcut] --------------------------------------------------- -<1> Mapping source provided as `Object` key-pairs, which gets converted to -JSON format - ==== Aliases The aliases of the template will define aliasing to the index whose name matches the template's patterns. A placeholder `{index}` can be used in an alias of a template. diff --git a/docs/java-rest/high-level/ml/set-upgrade-mode.asciidoc b/docs/java-rest/high-level/ml/set-upgrade-mode.asciidoc new file mode 100644 index 0000000000000..80bb1874e4a63 --- /dev/null +++ b/docs/java-rest/high-level/ml/set-upgrade-mode.asciidoc @@ -0,0 +1,40 @@ +-- +:api: set-upgrade-mode +:request: SetUpgradeModeRequest +:response: AcknowledgedResponse +-- +[id="{upid}-{api}"] +=== Set Upgrade Mode API + +The Set Upgrade Mode API temporarily halts all {ml} job and {dfeed} tasks when `enabled=true`. Their +reported states remain unchanged. Consequently, when exiting upgrade mode the halted {ml} jobs and +{dfeeds} will return to their previous state. + +It accepts a +{request}+ object and responds with a +{response}+ object. + +When `enabled=true`, no new jobs can be opened, and no job or {dfeed} tasks will +be running. Be sure to set `enabled=false` once upgrade actions are completed. + +[id="{upid}-{api}-request"] +==== Set Upgrade Mode Request + +A +{request}+ object gets created setting the desired `enabled` state. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Constructing a new request referencing enabling upgrade mode +<2> Optionally setting the `timeout` value for how long the +execution should wait. + +[id="{upid}-{api}-response"] +==== Set Upgrade Mode Response + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> `isAcknowledged()` from the +{response}+ indicates if the setting was set successfully. + +include::../execution.asciidoc[] \ No newline at end of file diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 0b4a2570c896d..70f06e457e9b5 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -295,6 +295,7 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <<{upid}-update-model-snapshot>> * <<{upid}-get-ml-info>> * <<{upid}-delete-expired-data>> +* <<{upid}-set-upgrade-mode>> include::ml/put-job.asciidoc[] include::ml/get-job.asciidoc[] @@ -338,6 +339,7 @@ include::ml/revert-model-snapshot.asciidoc[] include::ml/update-model-snapshot.asciidoc[] include::ml/get-info.asciidoc[] include::ml/delete-expired-data.asciidoc[] +include::ml/set-upgrade-mode.asciidoc[] == Migration APIs diff --git a/docs/painless/painless-debugging.asciidoc b/docs/painless/painless-debugging.asciidoc index c141cbc53252a..e335bfe74ebd7 100644 --- a/docs/painless/painless-debugging.asciidoc +++ b/docs/painless/painless-debugging.asciidoc @@ -18,10 +18,10 @@ context available to a {ref}/query-dsl-script-query.html[script query]. [source,js] --------------------------------------------------------- -PUT /hockey/player/1?refresh +PUT /hockey/_doc/1?refresh {"first":"johnny","last":"gaudreau","goals":[9,27,1],"assists":[17,46,0],"gp":[26,82,1]} -POST /hockey/player/1/_explain +POST /hockey/_explain/1 { "query": { "script": { @@ -31,7 +31,7 @@ POST /hockey/player/1/_explain } --------------------------------------------------------- // CONSOLE -// TEST[s/_explain/_explain?error_trace=false/ catch:/painless_explain_error/] +// TEST[s/_explain\/1/_explain\/1?error_trace=false/ catch:/painless_explain_error/] // The test system sends error_trace=true by default for easier debugging so // we have to override it to get a normal shaped response @@ -58,13 +58,13 @@ in the `_update` API: [source,js] --------------------------------------------------------- -POST /hockey/player/1/_update +POST /hockey/_update/1 { "script": "Debug.explain(ctx._source)" } --------------------------------------------------------- // CONSOLE -// TEST[continued s/_update/_update?error_trace=false/ catch:/painless_explain_error/] +// TEST[continued s/_update\/1/_update\/1?error_trace=false/ catch:/painless_explain_error/] The response looks like: diff --git a/docs/painless/painless-getting-started.asciidoc b/docs/painless/painless-getting-started.asciidoc index 936bd8e198c72..f562033471e31 100644 --- a/docs/painless/painless-getting-started.asciidoc +++ b/docs/painless/painless-getting-started.asciidoc @@ -10,7 +10,7 @@ To illustrate how Painless works, let's load some hockey stats into an Elasticse [source,js] ---------------------------------------------------------------- -PUT hockey/player/_bulk?refresh +PUT hockey/_bulk?refresh {"index":{"_id":1}} {"first":"johnny","last":"gaudreau","goals":[9,27,1],"assists":[17,46,0],"gp":[26,82,1],"born":"1993/08/13"} {"index":{"_id":2}} @@ -158,7 +158,7 @@ To change player 1's last name to `hockey`, simply set `ctx._source.last` to the [source,js] ---------------------------------------------------------------- -POST hockey/player/1/_update +POST hockey/_update/1 { "script": { "lang": "painless", @@ -176,7 +176,7 @@ the player's nickname, _hockey_. [source,js] ---------------------------------------------------------------- -POST hockey/player/1/_update +POST hockey/_update/1 { "script": { "lang": "painless", diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index b0b87dda792fe..e7ab83ca6e69b 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -221,6 +221,32 @@ The following settings are supported: currently supported by the plugin. For more information about the different classes, see http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html[AWS Storage Classes Guide] +NOTE: The option of defining client settings in the repository settings as documented below is considered deprecated: + +In addition to the above settings, you may also specify all non-secure client settings in the repository settings. +In this case, the client settings found in the repository settings will be merged with those of the named client used by the repository. +Conflicts between client and repository settings are resolved by the repository settings taking precedence over client settings. + +For example: + +[source,js] +---- +PUT _snapshot/my_s3_repository +{ + "type": "s3", + "settings": { + "client": "my_client_name", + "bucket": "my_bucket_name", + "endpoint": "my.s3.endpoint" + } +} +---- +// CONSOLE +// TEST[skip:we don't have s3 set up while testing this] + +This sets up a repository that uses all client settings from the client `my_client_named` except for the `endpoint` that is overridden +to `my.s3.endpoint` by the repository settings. + [[repository-s3-permissions]] ===== Recommended S3 Permissions diff --git a/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc b/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc index a451c6da0db95..405a9d89107f2 100644 --- a/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc @@ -49,7 +49,7 @@ POST /sales/_search?size=0 "aggs" : { "type_count" : { "cardinality" : { - "field" : "_doc", + "field" : "type", "precision_threshold": 100 <1> } } @@ -214,7 +214,7 @@ POST /sales/_search?size=0 "script" : { "id": "my_script", "params": { - "type_field": "_doc", + "type_field": "type", "promoted_field": "promoted" } } diff --git a/docs/reference/ccr/apis/follow-request-body.asciidoc b/docs/reference/ccr/apis/follow-request-body.asciidoc index 7215cc01302a1..e7e6ae2e26a05 100644 --- a/docs/reference/ccr/apis/follow-request-body.asciidoc +++ b/docs/reference/ccr/apis/follow-request-body.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="platinum"] `max_read_request_operation_count`:: (integer) the maximum number of operations to pull per read from the remote cluster @@ -41,4 +43,62 @@ remote cluster when the follower index is synchronized with the leader index; when the timeout has elapsed, the poll for operations will return to the follower so that it can update some statistics, and then the follower will - immediately attempt to read from the leader again \ No newline at end of file + immediately attempt to read from the leader again + +===== Default values + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT /follower_index/_ccr/follow?wait_for_active_shards=1 +{ + "remote_cluster" : "remote_cluster", + "leader_index" : "leader_index" +} +-------------------------------------------------- +// CONSOLE +// TESTSETUP +// TEST[setup:remote_cluster_and_leader_index] + +[source,js] +-------------------------------------------------- +POST /follower_index/_ccr/pause_follow +-------------------------------------------------- +// CONSOLE +// TEARDOWN + +[source,js] +-------------------------------------------------- +GET /follower_index/_ccr/info?filter_path=follower_indices.parameters +-------------------------------------------------- +// CONSOLE + +////////////////////////// + +The following output from the follow info api describes all the default +values for the above described index follow request parameters: + +[source,js] +-------------------------------------------------- +{ + "follower_indices" : [ + { + "parameters" : { + "max_read_request_operation_count" : 5120, + "max_read_request_size" : "32mb", + "max_outstanding_read_requests" : 12, + "max_write_request_operation_count" : 5120, + "max_write_request_size" : "9223372036854775807b", + "max_outstanding_write_requests" : 9, + "max_write_buffer_count" : 2147483647, + "max_write_buffer_size" : "512mb", + "max_retry_delay" : "500ms", + "read_poll_timeout" : "1m" + } + } + ] +} + +-------------------------------------------------- +// TESTRESPONSE \ No newline at end of file diff --git a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc index 22418db10887c..212b1167b6e33 100644 --- a/docs/reference/ccr/apis/follow/get-follow-info.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-info.asciidoc @@ -22,7 +22,7 @@ replication options and whether the follower indices are active or paused. [source,js] -------------------------------------------------- -PUT /follower_index/_ccr/follow +PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { "remote_cluster" : "remote_cluster", "leader_index" : "leader_index" diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index 766f502ff93a3..8c02582e01278 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -21,7 +21,7 @@ following tasks associated with each shard for the specified indices. [source,js] -------------------------------------------------- -PUT /follower_index/_ccr/follow +PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { "remote_cluster" : "remote_cluster", "leader_index" : "leader_index" diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index 0d56ee76bd9b9..f5b0bef7b2994 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -24,7 +24,7 @@ following task. [source,js] -------------------------------------------------- -PUT /follower_index/_ccr/follow +PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { "remote_cluster" : "remote_cluster", "leader_index" : "leader_index" diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index e8b4cd50f27e7..736061f2bfde8 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -23,7 +23,7 @@ returns, the follower index will resume fetching operations from the leader inde [source,js] -------------------------------------------------- -PUT /follower_index/_ccr/follow +PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { "remote_cluster" : "remote_cluster", "leader_index" : "leader_index" diff --git a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc index 6507c04ac5026..c3126d02d1efc 100644 --- a/docs/reference/ccr/apis/follow/post-unfollow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-unfollow.asciidoc @@ -27,7 +27,7 @@ irreversible operation. [source,js] -------------------------------------------------- -PUT /follower_index/_ccr/follow +PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { "remote_cluster" : "remote_cluster", "leader_index" : "leader_index" diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index 3f6156c1e6820..52253d6ad2f4c 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -31,7 +31,7 @@ POST /follower_index/_ccr/pause_follow [source,js] -------------------------------------------------- -PUT //_ccr/follow +PUT //_ccr/follow?wait_for_active_shards=1 { "remote_cluster" : "", "leader_index" : "" @@ -43,6 +43,11 @@ PUT //_ccr/follow // TEST[s//remote_cluster/] // TEST[s//leader_index/] +The `wait_for_active_shards` parameter specifies the number of shards to wait on being active +before responding. This defaults to waiting on none of the shards to be active. A shard must +be restored from the leader index being active. Restoring a follower shard requires transferring +all the remote Lucene segment files to the follower index. + ==== Path Parameters `follower_index` (required):: @@ -73,7 +78,7 @@ This example creates a follower index named `follower_index`: [source,js] -------------------------------------------------- -PUT /follower_index/_ccr/follow +PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { "remote_cluster" : "remote_cluster", "leader_index" : "leader_index", diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index f47e49ee82674..8949de8787fa7 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -22,7 +22,7 @@ shard-level stats as in the <>. [source,js] -------------------------------------------------- -PUT /follower_index/_ccr/follow +PUT /follower_index/_ccr/follow?wait_for_active_shards=1 { "remote_cluster" : "remote_cluster", "leader_index" : "leader_index" diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index 1af236f7d86fb..7c59b8628052f 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -230,7 +230,7 @@ cluster. [source,js] -------------------------------------------------- -PUT /server-metrics-copy/_ccr/follow +PUT /server-metrics-copy/_ccr/follow?wait_for_active_shards=1 { "remote_cluster" : "leader", "leader_index" : "server-metrics" diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index 257b88289d87a..e8a681567d622 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -372,14 +372,6 @@ the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 http status code). -WARNING: External versioning supports the value 0 as a valid version number. -This allows the version to be in sync with an external versioning system -where version numbers start from zero instead of one. It has the side effect -that documents with version number equal to zero can neither be updated -using the <> nor be deleted -using the <> as long as their -version number is equal to zero. - A nice side effect is that there is no need to maintain strict ordering of async indexing operations executed as a result of changes to a source database, as long as version numbers from the source database are used. diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 42840b1b0a5ec..64c0f67bc722c 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -32,7 +32,7 @@ Now, we can execute a script that would increment the counter: [source,js] -------------------------------------------------- -POST test/_doc/1/_update +POST test/_update/1 { "script" : { "source": "ctx._source.counter += params.count", @@ -51,7 +51,7 @@ will still add it, since it's a list): [source,js] -------------------------------------------------- -POST test/_doc/1/_update +POST test/_update/1 { "script" : { "source": "ctx._source.tags.add(params.tag)", @@ -73,7 +73,7 @@ remove only one occurrence of it: [source,js] -------------------------------------------------- -POST test/_doc/1/_update +POST test/_update/1 { "script" : { "source": "if (ctx._source.tags.contains(params.tag)) { ctx._source.tags.remove(ctx._source.tags.indexOf(params.tag)) }", @@ -95,7 +95,7 @@ We can also add a new field to the document: [source,js] -------------------------------------------------- -POST test/_doc/1/_update +POST test/_update/1 { "script" : "ctx._source.new_field = 'value_of_new_field'" } @@ -107,7 +107,7 @@ Or remove a field from the document: [source,js] -------------------------------------------------- -POST test/_doc/1/_update +POST test/_update/1 { "script" : "ctx._source.remove('new_field')" } @@ -121,7 +121,7 @@ the doc if the `tags` field contain `green`, otherwise it does nothing [source,js] -------------------------------------------------- -POST test/_doc/1/_update +POST test/_update/1 { "script" : { "source": "if (ctx._source.tags.contains(params.tag)) { ctx.op = 'delete' } else { ctx.op = 'none' }", @@ -148,7 +148,7 @@ existing document: [source,js] -------------------------------------------------- -POST test/_doc/1/_update +POST test/_update/1 { "doc" : { "name" : "new_name" @@ -169,7 +169,7 @@ By default updates that don't change anything detect that they don't change anyt [source,js] -------------------------------------------------- -POST test/_doc/1/_update +POST test/_update/1 { "doc" : { "name" : "new_name" @@ -204,7 +204,7 @@ You can disable this behavior by setting "detect_noop": false like this: [source,js] -------------------------------------------------- -POST test/_doc/1/_update +POST test/_update/1 { "doc" : { "name" : "new_name" @@ -225,7 +225,7 @@ will be inserted as a new document. If the document does exist, then the [source,js] -------------------------------------------------- -POST test/_doc/1/_update +POST test/_update/1 { "script" : { "source": "ctx._source.counter += params.count", @@ -251,7 +251,7 @@ or not -- i.e. the script handles initializing the document instead of the [source,js] -------------------------------------------------- -POST sessions/session/dh3sgudg8gsrgl/_update +POST sessions/_update/dh3sgudg8gsrgl { "scripted_upsert":true, "script" : { @@ -280,7 +280,7 @@ value: [source,js] -------------------------------------------------- -POST test/_doc/1/_update +POST test/_update/1 { "doc" : { "name" : "new_name" diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index d8656f7ac4c25..bee2ae5194477 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -65,9 +65,7 @@ A type used to be a logical category/partition of your index to allow you to sto [float] === Document -A document is a basic unit of information that can be indexed. For example, you can have a document for a single customer, another document for a single product, and yet another for a single order. This document is expressed in http://json.org/[JSON] (JavaScript Object Notation) which is a ubiquitous internet data interchange format. - -Within an index/type, you can store as many documents as you want. Note that although a document physically resides in an index, a document actually must be indexed/assigned to a type inside an index. +A document is a basic unit of information that can be indexed. For example, you can have a document for a single customer, another document for a single product, and yet another for a single order. This document is expressed in http://json.org/[JSON] (JavaScript Object Notation) which is a ubiquitous internet data interchange format. Within an index, you can store as many documents as you want. [[getting-started-shards-and-replicas]] [float] @@ -496,7 +494,7 @@ If we study the above commands carefully, we can actually see a pattern of how w [source,js] -------------------------------------------------- - /// + /// -------------------------------------------------- // NOTCONSOLE @@ -572,7 +570,7 @@ This example shows how to update our previous document (ID of 1) by changing the [source,js] -------------------------------------------------- -POST /customer/_doc/1/_update?pretty +POST /customer/_update/1?pretty { "doc": { "name": "Jane Doe" } } @@ -584,7 +582,7 @@ This example shows how to update our previous document (ID of 1) by changing the [source,js] -------------------------------------------------- -POST /customer/_doc/1/_update?pretty +POST /customer/_update/1?pretty { "doc": { "name": "Jane Doe", "age": 20 } } @@ -596,7 +594,7 @@ Updates can also be performed by using simple scripts. This example uses a scrip [source,js] -------------------------------------------------- -POST /customer/_doc/1/_update?pretty +POST /customer/_update/1?pretty { "script" : "ctx._source.age += 5" } @@ -719,7 +717,7 @@ yellow open bank l7sSYV2cQXmu6_4rJWVIww 5 1 1000 0 12 // TESTRESPONSE[s/128.6kb/\\d+(\\.\\d+)?[mk]?b/] // TESTRESPONSE[s/l7sSYV2cQXmu6_4rJWVIww/.+/ _cat] -Which means that we just successfully bulk indexed 1000 documents into the bank index (under the `_doc` type). +Which means that we just successfully bulk indexed 1000 documents into the bank index. [[getting-started-search-API]] === The Search API diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index ee5ee4b4fe664..b9066a4c7af49 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -534,3 +534,77 @@ PUT index/_doc/1 The <>, <>, <> and <> APIs will continue to return a `_type` key in the response in 7.0, but it is considered deprecated and will be removed in 8.0. + +[float] +=== Index templates + +It is recommended to make index templates typeless before upgrading to 7.0 by +re-adding them with `include_type_name` set to `false`. + +In case typeless templates are used with typed index creation calls or typed +templates are used with typeless index creation calls, the template will still +be applied but the index creation call decides whether there should be a type +or not. For instance in the below example, `index-1-01` will have a type in +spite of the fact that it matches a template that is typeless, and `index-2-01` +will be typeless in spite of the fact that it matches a template that defines +a type. Both `index-1-01` and `index-2-01` will inherit the `foo` field from +the template that they match. + +[source,js] +-------------------------------------------------- +PUT _template/template1 +{ + "index_patterns":[ "index-1-*" ], + "mappings": { + "properties": { + "foo": { + "type": "keyword" + } + } + } +} + +PUT _template/template2?include_type_name=true +{ + "index_patterns":[ "index-2-*" ], + "mappings": { + "type": { + "properties": { + "foo": { + "type": "keyword" + } + } + } + } +} + +PUT index-1-01?include_type_name=true +{ + "mappings": { + "type": { + "properties": { + "bar": { + "type": "long" + } + } + } + } +} + +PUT index-2-01 +{ + "mappings": { + "properties": { + "bar": { + "type": "long" + } + } + } +} +-------------------------------------------------- +// CONSOLE + +In case of implicit index creation, because of documents that get indexed in +an index that doesn't exist yet, the template is always honored. This is +usually not a problem due to the fact that typless index calls work on typed +indices. diff --git a/docs/reference/migration/migrate_7_0/api.asciidoc b/docs/reference/migration/migrate_7_0/api.asciidoc index 3972be43685cc..bb151edb778e2 100644 --- a/docs/reference/migration/migrate_7_0/api.asciidoc +++ b/docs/reference/migration/migrate_7_0/api.asciidoc @@ -126,3 +126,15 @@ removed. The `_termvector` endpoint was deprecated in 2.0 and has now been removed. The endpoint `_termvectors` (plural) should be used instead. + +[float] +==== When {security-features} are enabled, index monitoring APIs over restricted indices are not authorized implicitly anymore + +Restricted indices (currently only `.security-6` and `.security`) are special internal +indices that require setting the `allow_restricted_indices` flag on every index +permission that covers them. If this flag is `false` (default) the permission +will not cover these and actions against them will not be authorized. +However, the monitoring APIs were the only exception to this rule. This exception +has been forfeited and index monitoring privileges have to be granted explicitly, +using the `allow_restricted_indices` flag on the permission (as any other index +privilege). diff --git a/docs/reference/migration/migrate_7_0/search.asciidoc b/docs/reference/migration/migrate_7_0/search.asciidoc index 67adf9363406c..0f3dcf9771c3d 100644 --- a/docs/reference/migration/migrate_7_0/search.asciidoc +++ b/docs/reference/migration/migrate_7_0/search.asciidoc @@ -122,6 +122,16 @@ using the "all fields" mode ("default_field": "*") or other fieldname expansions Search requests with extra content after the main object will no longer be accepted by the `_search` endpoint. A parsing exception will be thrown instead. +[float] +==== Doc-value fields default format + +The format of doc-value fields is changing to be the same as what could be +obtained in 6.x with the special `use_field_mapping` format. This is mostly a +change for date fields, which are now formatted based on the format that is +configured in the mappings by default. This behavior can be changed by +specifying a <> within the doc-value +field. + [float] ==== Context Completion Suggester diff --git a/docs/reference/ml/apis/ml-api.asciidoc b/docs/reference/ml/apis/ml-api.asciidoc index 6cb0dc6ba4093..7933dea85ce0a 100644 --- a/docs/reference/ml/apis/ml-api.asciidoc +++ b/docs/reference/ml/apis/ml-api.asciidoc @@ -72,7 +72,7 @@ machine learning APIs and in advanced job configuration options in Kibana. [float] [[ml-api-file-structure-endpoint]] -=== File Structure +=== File structure * <> @@ -84,10 +84,16 @@ machine learning APIs and in advanced job configuration options in Kibana. [float] [[ml-api-delete-expired-data-endpoint]] -=== Delete Expired Data +=== Delete expired data * <> +[float] +[[ml-set-upgrade-mode-endpoint]] +=== Set upgrade mode + +* <> + //ADD include::post-calendar-event.asciidoc[] include::put-calendar-job.asciidoc[] @@ -137,7 +143,8 @@ include::post-data.asciidoc[] include::preview-datafeed.asciidoc[] //REVERT include::revert-snapshot.asciidoc[] -//START/STOP +//SET/START/STOP +include::set-upgrade-mode.asciidoc[] include::start-datafeed.asciidoc[] include::stop-datafeed.asciidoc[] //UPDATE diff --git a/docs/reference/ml/apis/set-upgrade-mode.asciidoc b/docs/reference/ml/apis/set-upgrade-mode.asciidoc new file mode 100644 index 0000000000000..5434d70d4e61e --- /dev/null +++ b/docs/reference/ml/apis/set-upgrade-mode.asciidoc @@ -0,0 +1,103 @@ +[role="xpack"] +[testenv="platinum"] +[[ml-set-upgrade-mode]] +=== Set upgrade mode API +++++ +Set upgrade mode +++++ + +Sets a cluster wide upgrade_mode setting that prepares {ml} indices for an +upgrade. + +==== Request +////////////////////////// + +[source,js] +-------------------------------------------------- +POST /_ml/set_upgrade_mode?enabled=false&timeout=10m +-------------------------------------------------- +// CONSOLE +// TEST +// TEARDOWN + +////////////////////////// + + +`POST _ml/set_upgrade_mode` + +==== Description + +When upgrading your cluster, in some circumstances you must restart your nodes and +reindex your {ml} indices. In those circumstances, there must be no {ml} jobs running. +You can close the {ml} jobs, do the upgrade, then open all the jobs again. +Alternatively, you can use this API to temporarily halt tasks associated +with the jobs and {dfeeds} and prevent new jobs from opening. You can also use this +API during upgrades that do not require you to reindex your {ml} indices, +though stopping jobs is not a requirement in that case. + +For more information, see {stack-ref}/upgrading-elastic-stack.html[Upgrading the {stack}]. + + +When `enabled=true` this API temporarily halts all job and {dfeed} tasks and +prohibits new job and {dfeed} tasks from starting. + +Subsequently, you can call the API with the enabled parameter set to false, +which causes {ml} jobs and {dfeeds} to return to their desired states. + +You can see the current value for the `upgrade_mode` setting by using the +<>. + +IMPORTANT: No new {ml} jobs can be opened while the `upgrade_mode` setting is +`true`. + +==== Query Parameters + +`enabled`:: + (boolean) When `true`, this enables `upgrade_mode`. Defaults to `false` + +`timeout`:: + (time) The time to wait for the request to be completed. + The default value is 30 seconds. + +==== Authorization + +You must have `manage_ml`, or `manage` cluster privileges to use this API. +For more information, see +{stack-ov}/security-privileges.html[Security privileges]. + + +==== Examples + +The following example enables `upgrade_mode` for the cluster: + +[source,js] +-------------------------------------------------- +POST _ml/set_upgrade_mode?enabled=true&timeout=10m +-------------------------------------------------- +// CONSOLE +// TEST + +When the call is successful, an acknowledged response is returned. For example: + +[source,js] +---- +{ + "acknowledged": true +} +---- +// TESTRESPONSE + +The acknowledged response will only be returned once all {ml} jobs and {dfeeds} have +finished writing to the {ml} internal indices. This means it is safe to reindex those +internal indices without causing failures. You must wait for the acknowledged +response before reindexing to ensure that all writes are completed. + +When the upgrade is complete, you must set `upgrade_mode` to `false` for +{ml} jobs to start running again. For example: + +[source,js] +-------------------------------------------------- +POST _ml/set_upgrade_mode?enabled=false&timeout=10m +-------------------------------------------------- +// CONSOLE +// TEST diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index f02919cb39660..382b4ab78ff77 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -63,6 +63,11 @@ Example response: "expiry_date_in_millis" : 1542665112332 }, "features" : { + "ccr" : { + "description" : "Cross Cluster Replication", + "available" : true, + "enabled" : true + }, "graph" : { "description" : "Graph Data Exploration for the Elastic Stack", "available" : true, diff --git a/docs/reference/search/request/docvalue-fields.asciidoc b/docs/reference/search/request/docvalue-fields.asciidoc index bcfcb20d1d53b..6697b5bb3e383 100644 --- a/docs/reference/search/request/docvalue-fields.asciidoc +++ b/docs/reference/search/request/docvalue-fields.asciidoc @@ -12,9 +12,9 @@ GET /_search "match_all": {} }, "docvalue_fields" : [ + "my_ip_field", <1> { - "field": "my_ip_field", <1> - "format": "use_field_mapping" <2> + "field": "my_keyword_field" <2> }, { "field": "my_date_field", @@ -25,10 +25,10 @@ GET /_search -------------------------------------------------- // CONSOLE <1> the name of the field -<2> the special `use_field_mapping` format tells Elasticsearch to use the format from the mapping -<3> date fields may use a custom format +<2> an object notation is supported as well +<3> the object notation allows to specify a custom format -Doc value fields can work on fields that are not stored. +Doc value fields can work on fields that have doc-values enabled, regardless of whether they are stored `*` can be used as a wild card, for example: @@ -41,8 +41,8 @@ GET /_search }, "docvalue_fields" : [ { - "field": "*field", <1> - "format": "use_field_mapping" <2> + "field": "*_date_field", <1> + "format": "epoch_millis" <2> } ] } @@ -62,9 +62,8 @@ While most fields do not support custom formats, some of them do: - <> fields can take any <>. - <> fields accept a https://docs.oracle.com/javase/8/docs/api/java/text/DecimalFormat.html[DecimalFormat pattern]. -All fields support the special `use_field_mapping` format, which tells -Elasticsearch to use the mappings to figure out a default format. +By default fields are formatted based on a sensible configuration that depends +on their mappings: `long`, `double` and other numeric fields are formatted as +numbers, `keyword` fields are formatted as strings, `date` fields are formatted +with the configured `date` format, etc. -NOTE: The default is currently to return the same output as -<>. However it will change in 7.0 -to behave as if the `use_field_mapping` format was provided. diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index b287b1609703e..7774e34c2c00d 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -246,10 +246,7 @@ POST test/_search "inner_hits": { "_source" : false, "docvalue_fields" : [ - { - "field": "comments.text.keyword", - "format": "use_field_mapping" - } + "comments.text.keyword" ] } } diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc index c5046f51ba112..97b4762338936 100644 --- a/docs/reference/setup/install/deb.asciidoc +++ b/docs/reference/setup/install/deb.asciidoc @@ -150,17 +150,17 @@ The Debian package for Elasticsearch v{version} can be downloaded from the websi ["source","sh",subs="attributes"] -------------------------------------------- -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.deb -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.deb.sha512 -shasum -a 512 -c elasticsearch-{version}.deb.sha512 <1> -sudo dpkg -i elasticsearch-{version}.deb +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-amd64.deb +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-amd64.deb.sha512 +shasum -a 512 -c elasticsearch-{version}-amd64.deb.sha512 <1> +sudo dpkg -i elasticsearch-{version}-amd64.deb -------------------------------------------- <1> Compares the SHA of the downloaded Debian package and the published checksum, which should output - `elasticsearch-{version}.deb: OK`. + `elasticsearch-{version}-amd64.deb: OK`. Alternatively, you can download the following package, which contains only features that are available under the Apache 2.0 license: -https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.deb +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}-amd64.deb endif::[] diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc index 11add03573db1..a450e202b6896 100644 --- a/docs/reference/setup/install/rpm.asciidoc +++ b/docs/reference/setup/install/rpm.asciidoc @@ -135,17 +135,17 @@ The RPM for Elasticsearch v{version} can be downloaded from the website and inst ["source","sh",subs="attributes"] -------------------------------------------- -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.rpm -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.rpm.sha512 -shasum -a 512 -c elasticsearch-{version}.rpm.sha512 <1> -sudo rpm --install elasticsearch-{version}.rpm +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-x86_64.rpm +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-x86_64.rpm.sha512 +shasum -a 512 -c elasticsearch-{version}-x86_64.rpm.sha512 <1> +sudo rpm --install elasticsearch-{version}-x86_64.rpm -------------------------------------------- <1> Compares the SHA of the downloaded RPM and the published checksum, which should output - `elasticsearch-{version}.rpm: OK`. + `elasticsearch-{version}-x86_64.rpm: OK`. Alternatively, you can download the following package, which contains only features that are available under the Apache 2.0 license: -https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.rpm +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}-x86_64.rpm endif::[] diff --git a/docs/reference/setup/install/zip-targz.asciidoc b/docs/reference/setup/install/zip-targz.asciidoc index 735ca5b4ea0d1..d532438103754 100644 --- a/docs/reference/setup/install/zip-targz.asciidoc +++ b/docs/reference/setup/install/zip-targz.asciidoc @@ -32,19 +32,19 @@ The `.zip` archive for Elasticsearch v{version} can be downloaded and installed ["source","sh",subs="attributes"] -------------------------------------------- -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.zip -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.zip.sha512 -shasum -a 512 -c elasticsearch-{version}.zip.sha512 <1> -unzip elasticsearch-{version}.zip +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-windows-x86_64.zip +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-windows-x86_64.zip.sha512 +shasum -a 512 -c elasticsearch-{version}-windows-x86_64.zip.sha512 <1> +unzip elasticsearch-{version}-windows-x86_64.zip cd elasticsearch-{version}/ <2> -------------------------------------------- <1> Compares the SHA of the downloaded `.zip` archive and the published checksum, which should output - `elasticsearch-{version}.zip: OK`. + `elasticsearch-{version}-windows-x86_64.zip: OK`. <2> This directory is known as `$ES_HOME`. Alternatively, you can download the following package, which contains only features that are available under the Apache 2.0 license: -https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.zip +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}-windows-x86_64.zip endif::[] @@ -64,19 +64,19 @@ The `.tar.gz` archive for Elasticsearch v{version} can be downloaded and install ["source","sh",subs="attributes"] -------------------------------------------- -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.tar.gz -wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.tar.gz.sha512 -shasum -a 512 -c elasticsearch-{version}.tar.gz.sha512 <1> -tar -xzf elasticsearch-{version}.tar.gz +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-linux-x86_64.tar.gz +wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-linux-x86_64.tar.gz.sha512 +shasum -a 512 -c elasticsearch-{version}-linux-x86_64.tar.gz.sha512 <1> +tar -xzf elasticsearch-{version}-linux-x86_64.tar.gz cd elasticsearch-{version}/ <2> -------------------------------------------- <1> Compares the SHA of the downloaded `.tar.gz` archive and the published checksum, which should output - `elasticsearch-{version}.tar.gz: OK`. + `elasticsearch-{version}-linux-x86_64.tar.gz: OK`. <2> This directory is known as `$ES_HOME`. Alternatively, you can download the following package, which includes only Apache 2.0 licensed code: -https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.tar.gz +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}-linux-x86_64.tar.gz endif::[] diff --git a/docs/reference/setup/install/zip-windows.asciidoc b/docs/reference/setup/install/zip-windows.asciidoc index 254fb63f6157d..967b449bc972b 100644 --- a/docs/reference/setup/install/zip-windows.asciidoc +++ b/docs/reference/setup/install/zip-windows.asciidoc @@ -31,11 +31,11 @@ endif::[] ifeval::["{release-state}"!="unreleased"] -Download the `.zip` archive for Elasticsearch v{version} from: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.zip +Download the `.zip` archive for Elasticsearch v{version} from: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-windows-x86_64.zip Alternatively, you can download the following package, which contains only features that are available under the Apache 2.0 license: -https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.zip +https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}-windows-x86_64.zip Unzip it with your favourite unzip tool. This will create a folder called +elasticsearch-{version}+, which we will refer to as `%ES_HOME%`. In a terminal diff --git a/docs/reference/sql/endpoints/translate.asciidoc b/docs/reference/sql/endpoints/translate.asciidoc index 2bb7bfae4d225..de9d8adbeab0b 100644 --- a/docs/reference/sql/endpoints/translate.asciidoc +++ b/docs/reference/sql/endpoints/translate.asciidoc @@ -27,8 +27,7 @@ Which returns: "size" : 10, "docvalue_fields" : [ { - "field": "page_count", - "format": "use_field_mapping" + "field": "page_count" }, { "field": "release_date", diff --git a/libs/geo/src/main/eclipse-build.gradle b/libs/geo/src/main/eclipse-build.gradle new file mode 100644 index 0000000000000..4864452846b13 --- /dev/null +++ b/libs/geo/src/main/eclipse-build.gradle @@ -0,0 +1,3 @@ + +// this is just shell gradle file for eclipse to have separate projects for geo src and tests +apply from: '../../build.gradle' diff --git a/libs/geo/src/test/eclipse-build.gradle b/libs/geo/src/test/eclipse-build.gradle new file mode 100644 index 0000000000000..8dc16debdde32 --- /dev/null +++ b/libs/geo/src/test/eclipse-build.gradle @@ -0,0 +1,6 @@ + +// this is just shell gradle file for eclipse to have separate projects for geo src and tests +apply from: '../../build.gradle' +dependencies { + testCompile project(':libs:elasticsearch-geo') +} \ No newline at end of file diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java index 4b76d837b9515..f28e71134bd85 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java @@ -49,8 +49,7 @@ public RatedSearchHit(SearchHit searchHit, OptionalInt rating) { } RatedSearchHit(StreamInput in) throws IOException { - this(SearchHit.readSearchHit(in), - in.readBoolean() == true ? OptionalInt.of(in.readVInt()) : OptionalInt.empty()); + this(new SearchHit(in), in.readBoolean() == true ? OptionalInt.of(in.readVInt()) : OptionalInt.empty()); } @Override diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java index e55dab1c38f64..9617d2a3774da 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -35,7 +35,6 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.ParentTaskAssigningClient; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -50,6 +49,7 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.UpdateScript; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -88,7 +88,6 @@ public abstract class AbstractAsyncBulkByScrollActionrequest variables all representing child @@ -111,9 +110,10 @@ public abstract class AbstractAsyncBulkByScrollAction, ScrollableHitSource.Hit, RequestWrapper> scriptApplier; - public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, - ThreadPool threadPool, Request mainRequest, ScriptService scriptService, ClusterState clusterState, - ActionListener listener) { + public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, boolean needsSourceDocumentVersions, + boolean needsSourceDocumentSeqNoAndPrimaryTerm, Logger logger, ParentTaskAssigningClient client, + ThreadPool threadPool, Request mainRequest, ScriptService scriptService, + ActionListener listener) { this.task = task; if (!task.isWorker()) { @@ -125,7 +125,6 @@ public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, Logger logger, Par this.client = client; this.threadPool = threadPool; this.scriptService = scriptService; - this.clusterState = clusterState; this.mainRequest = mainRequest; this.listener = listener; BackoffPolicy backoffPolicy = buildBackoffPolicy(); @@ -137,11 +136,13 @@ public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, Logger logger, Par * them and if we add _doc as the first sort by default then sorts will never work.... So we add it here, only if there isn't * another sort. */ - List> sorts = mainRequest.getSearchRequest().source().sorts(); + final SearchSourceBuilder sourceBuilder = mainRequest.getSearchRequest().source(); + List> sorts = sourceBuilder.sorts(); if (sorts == null || sorts.isEmpty()) { - mainRequest.getSearchRequest().source().sort(fieldSort("_doc")); + sourceBuilder.sort(fieldSort("_doc")); } - mainRequest.getSearchRequest().source().version(needsSourceDocumentVersions()); + sourceBuilder.version(needsSourceDocumentVersions); + sourceBuilder.seqNoAndPrimaryTerm(needsSourceDocumentSeqNoAndPrimaryTerm); } /** @@ -153,12 +154,7 @@ public BiFunction, ScrollableHitSource.Hit, RequestWrapper> // The default script applier executes a no-op return (request, searchHit) -> request; } - - /** - * Does this operation need the versions of the source documents? - */ - protected abstract boolean needsSourceDocumentVersions(); - + /** * Build the {@link RequestWrapper} for a single search hit. This shouldn't handle * metadata or scripting. That will be handled by copyMetadata and diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AsyncDeleteByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AsyncDeleteByQueryAction.java index c86911649ac34..5026ec0f79c57 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AsyncDeleteByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/AsyncDeleteByQueryAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.reindex; import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.client.ParentTaskAssigningClient; @@ -31,20 +32,20 @@ * Implementation of delete-by-query using scrolling and bulk. */ public class AsyncDeleteByQueryAction extends AbstractAsyncBulkByScrollAction { + private final boolean useSeqNoForCAS; + public AsyncDeleteByQueryAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool, DeleteByQueryRequest request, ScriptService scriptService, ClusterState clusterState, ActionListener listener) { - super(task, logger, client, threadPool, request, scriptService, clusterState, listener); + super(task, + // not all nodes support sequence number powered optimistic concurrency control, we fall back to version + clusterState.nodes().getMinNodeVersion().onOrAfter(Version.V_6_7_0) == false, + // all nodes support sequence number powered optimistic concurrency control and we can use it + clusterState.nodes().getMinNodeVersion().onOrAfter(Version.V_6_7_0), + logger, client, threadPool, request, scriptService, listener); + useSeqNoForCAS = clusterState.nodes().getMinNodeVersion().onOrAfter(Version.V_6_7_0); } - @Override - protected boolean needsSourceDocumentVersions() { - /* - * We always need the version of the source document so we can report a version conflict if we try to delete it and it has been - * changed. - */ - return true; - } @Override protected boolean accept(ScrollableHitSource.Hit doc) { @@ -59,7 +60,12 @@ protected RequestWrapper buildRequest(ScrollableHitSource.Hit doc delete.index(doc.getIndex()); delete.type(doc.getType()); delete.id(doc.getId()); - delete.version(doc.getVersion()); + if (useSeqNoForCAS) { + delete.setIfSeqNo(doc.getSeqNo()); + delete.setIfPrimaryTerm(doc.getPrimaryTerm()); + } else { + delete.version(doc.getVersion()); + } return wrap(delete); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java index 0acc9a7b37dbc..3073b17d5bf95 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportReindexAction.java @@ -259,16 +259,13 @@ static class AsyncIndexBySearchAction extends AbstractAsyncBulkByScrollAction listener) { - super(task, logger, client, threadPool, request, scriptService, clusterState, listener); - } - - @Override - protected boolean needsSourceDocumentVersions() { - /* - * We only need the source version if we're going to use it when write and we only do that when the destination request uses - * external versioning. - */ - return mainRequest.getDestination().versionType() != VersionType.INTERNAL; + super(task, + /* + * We only need the source version if we're going to use it when write and we only do that when the destination request uses + * external versioning. + */ + request.getDestination().versionType() != VersionType.INTERNAL, + false, logger, client, threadPool, request, scriptService, listener); } @Override diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java index fc86583832619..9ed7744f8a27a 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.reindex; import org.apache.logging.log4j.Logger; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActionFilters; @@ -81,19 +82,19 @@ protected void doExecute(Task task, UpdateByQueryRequest request, ActionListener * Simple implementation of update-by-query using scrolling and bulk. */ static class AsyncIndexBySearchAction extends AbstractAsyncBulkByScrollAction { + + private final boolean useSeqNoForCAS; + AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client, ThreadPool threadPool, UpdateByQueryRequest request, ScriptService scriptService, ClusterState clusterState, ActionListener listener) { - super(task, logger, client, threadPool, request, scriptService, clusterState, listener); - } - - @Override - protected boolean needsSourceDocumentVersions() { - /* - * We always need the version of the source document so we can report a version conflict if we try to delete it and it has - * been changed. - */ - return true; + super(task, + // not all nodes support sequence number powered optimistic concurrency control, we fall back to version + clusterState.nodes().getMinNodeVersion().onOrAfter(Version.V_6_7_0) == false, + // all nodes support sequence number powered optimistic concurrency control and we can use it + clusterState.nodes().getMinNodeVersion().onOrAfter(Version.V_6_7_0), + logger, client, threadPool, request, scriptService, listener); + useSeqNoForCAS = clusterState.nodes().getMinNodeVersion().onOrAfter(Version.V_6_7_0); } @Override @@ -112,8 +113,13 @@ protected RequestWrapper buildRequest(ScrollableHitSource.Hit doc) index.type(doc.getType()); index.id(doc.getId()); index.source(doc.getSource(), doc.getXContentType()); - index.versionType(VersionType.INTERNAL); - index.version(doc.getVersion()); + if (useSeqNoForCAS) { + index.setIfSeqNo(doc.getSeqNo()); + index.setIfPrimaryTerm(doc.getPrimaryTerm()); + } else { + index.versionType(VersionType.INTERNAL); + index.version(doc.getVersion()); + } index.setPipeline(mainRequest.getPipeline()); return wrap(index); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java index c7e814237d6b9..ccd177b9174ab 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteResponseParsers.java @@ -21,13 +21,9 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.Version; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.index.reindex.ScrollableHitSource.BasicHit; -import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; -import org.elasticsearch.index.reindex.ScrollableHitSource.Response; -import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -37,6 +33,10 @@ import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.reindex.ScrollableHitSource.BasicHit; +import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; +import org.elasticsearch.index.reindex.ScrollableHitSource.Response; +import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; import org.elasticsearch.search.SearchHits; import java.io.IOException; @@ -90,6 +90,8 @@ private RemoteResponseParsers() {} ParseField routingField = new ParseField("_routing"); ParseField ttlField = new ParseField("_ttl"); ParseField parentField = new ParseField("_parent"); + ParseField seqNoField = new ParseField("_seq_no"); + ParseField primaryTermField = new ParseField("_primary_term"); HIT_PARSER.declareString(BasicHit::setRouting, routingField); // Pre-2.0.0 routing come back in "fields" class Fields { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index e6aee3596b1bd..cd23bf03add59 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -677,13 +677,8 @@ private void simulateScrollResponse(DummyAsyncBulkByScrollAction action, TimeVal private class DummyAsyncBulkByScrollAction extends AbstractAsyncBulkByScrollAction { DummyAsyncBulkByScrollAction() { - super(testTask, AsyncBulkByScrollActionTests.this.logger, new ParentTaskAssigningClient(client, localNode, testTask), - client.threadPool(), testRequest, null, null, listener); - } - - @Override - protected boolean needsSourceDocumentVersions() { - return randomBoolean(); + super(testTask, randomBoolean(), randomBoolean(), AsyncBulkByScrollActionTests.this.logger, + new ParentTaskAssigningClient(client, localNode, testTask), client.threadPool(), testRequest, null, listener); } @Override diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java index 3ce8884ff92fb..95ee787f13f63 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java @@ -19,12 +19,14 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; public class UpdateByQueryMetadataTests - extends AbstractAsyncBulkByScrollActionMetadataTestCase { - public void testRoutingIsCopied() throws Exception { + extends AbstractAsyncBulkByScrollActionMetadataTestCase { + + public void testRoutingIsCopied() { IndexRequest index = new IndexRequest(); action().copyMetadata(AbstractAsyncBulkByScrollAction.wrap(index), doc().setRouting("foo")); assertEquals("foo", index.routing()); @@ -43,12 +45,12 @@ protected UpdateByQueryRequest request() { private class TestAction extends TransportUpdateByQueryAction.AsyncIndexBySearchAction { TestAction() { super(UpdateByQueryMetadataTests.this.task, UpdateByQueryMetadataTests.this.logger, null, - UpdateByQueryMetadataTests.this.threadPool, request(), null, null, listener()); + UpdateByQueryMetadataTests.this.threadPool, request(), null, ClusterState.EMPTY_STATE, listener()); } @Override public AbstractAsyncBulkByScrollAction.RequestWrapper copyMetadata(AbstractAsyncBulkByScrollAction.RequestWrapper request, - Hit doc) { + Hit doc) { return super.copyMetadata(request, doc); } } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java index 90b78b9e1080d..0eb2a1cfb7d0a 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.script.ScriptService; import java.util.Date; @@ -54,6 +55,6 @@ protected UpdateByQueryRequest request() { @Override protected TransportUpdateByQueryAction.AsyncIndexBySearchAction action(ScriptService scriptService, UpdateByQueryRequest request) { return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(task, logger, null, threadPool, request, scriptService, - null, listener()); + ClusterState.EMPTY_STATE, listener()); } } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml index 9f9a99f0f5c2e..7ce6b86c6b525 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml @@ -89,7 +89,11 @@ - is_false: response.task --- -"Response for version conflict": +"Response for version conflict (version powered)": + - skip: + version: "6.7.0 - " + reason: reindex moved to rely on sequence numbers for concurrency control + - do: indices.create: index: test @@ -143,6 +147,64 @@ - match: {count: 1} +--- +"Response for version conflict (seq no powered)": + - skip: + version: " - 6.6.99" + reason: reindex moved to rely on sequence numbers for concurrency control + + - do: + indices.create: + index: test + body: + settings: + index.refresh_interval: -1 + - do: + index: + index: test + type: _doc + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + # Creates a new version for reindex to miss on scan. + - do: + index: + index: test + type: _doc + id: 1 + body: { "text": "test2" } + + - do: + catch: conflict + delete_by_query: + index: test + body: + query: + match_all: {} + + - match: {deleted: 0} + - match: {version_conflicts: 1} + - match: {batches: 1} + - match: {failures.0.index: test} + - match: {failures.0.type: _doc} + - match: {failures.0.id: "1"} + - match: {failures.0.status: 409} + - match: {failures.0.cause.type: version_conflict_engine_exception} + - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} + - match: {failures.0.cause.shard: /\d+/} + - match: {failures.0.cause.index: test} + - gte: { took: 0 } + + - do: + indices.refresh: {} + + - do: + count: + index: test + + - match: {count: 1} + --- "Response for version conflict with conflicts=proceed": - do: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/40_versioning.yml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/40_versioning.yml index 1e2afacca148e..8448b229b258b 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/40_versioning.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/40_versioning.yml @@ -1,5 +1,9 @@ --- "delete_by_query fails to delete documents with version number equal to zero": + - skip: + version: "6.7.0 - " + reason: reindex moved to rely on sequence numbers for concurrency control + - do: index: index: index1 diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml index d3881035d924f..eb76681c0b0d1 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml @@ -74,7 +74,10 @@ - is_false: response.deleted --- -"Response for version conflict": +"Response for version conflict (version powered)": + - skip: + version: "6.7.0 - " + reason: reindex moved to rely on sequence numbers for concurrency control - do: indices.create: index: test @@ -115,6 +118,50 @@ - match: {failures.0.cause.index: test} - gte: { took: 0 } +--- +"Response for version conflict (seq no powered)": + - skip: + version: " - 6.6.99" + reason: reindex moved to rely on sequence numbers for concurrency control + - do: + indices.create: + index: test + body: + settings: + index.refresh_interval: -1 + - do: + index: + index: test + type: _doc + id: 1 + body: { "text": "test" } + - do: + indices.refresh: {} + # Creates a new version for reindex to miss on scan. + - do: + index: + index: test + type: _doc + id: 1 + body: { "text": "test2" } + + - do: + catch: conflict + update_by_query: + index: test + - match: {updated: 0} + - match: {version_conflicts: 1} + - match: {batches: 1} + - match: {failures.0.index: test} + - match: {failures.0.type: _doc} + - match: {failures.0.id: "1"} + - match: {failures.0.status: 409} + - match: {failures.0.cause.type: version_conflict_engine_exception} + - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} + - match: {failures.0.cause.shard: /\d+/} + - match: {failures.0.cause.index: test} + - gte: { took: 0 } + --- "Response for version conflict with conflicts=proceed": - do: diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/40_versioning.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/40_versioning.yml index 7d2083f925b99..d14691be53b83 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/40_versioning.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/40_versioning.yml @@ -24,6 +24,9 @@ --- "update_by_query fails to update documents with version number equal to zero": + - skip: + version: "6.7.0 - " + reason: reindex moved to rely on sequence numbers for concurrency control - do: index: index: index1 diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicCredentials.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicCredentials.java new file mode 100644 index 0000000000000..f6dfb692d1de2 --- /dev/null +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicCredentials.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.amazonaws.auth.AWSCredentials; + +import java.util.Objects; + +class S3BasicCredentials implements AWSCredentials { + + private final String accessKey; + + private final String secretKey; + + S3BasicCredentials(String accessKey, String secretKey) { + this.accessKey = accessKey; + this.secretKey = secretKey; + } + + @Override + public final String getAWSAccessKeyId() { + return accessKey; + } + + @Override + public final String getAWSSecretKey() { + return secretKey; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final S3BasicCredentials that = (S3BasicCredentials) o; + return accessKey.equals(that.accessKey) && secretKey.equals(that.secretKey); + } + + @Override + public int hashCode() { + return Objects.hash(accessKey, secretKey); + } +} diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicSessionCredentials.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicSessionCredentials.java new file mode 100644 index 0000000000000..4057403aa2c0f --- /dev/null +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BasicSessionCredentials.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.amazonaws.auth.AWSSessionCredentials; + +import java.util.Objects; + +final class S3BasicSessionCredentials extends S3BasicCredentials implements AWSSessionCredentials { + + private final String sessionToken; + + S3BasicSessionCredentials(String accessKey, String secretKey, String sessionToken) { + super(accessKey, secretKey); + this.sessionToken = sessionToken; + } + + @Override + public String getSessionToken() { + return sessionToken; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final S3BasicSessionCredentials that = (S3BasicSessionCredentials) o; + return sessionToken.equals(that.sessionToken) && + getAWSAccessKeyId().equals(that.getAWSAccessKeyId()) && + getAWSSecretKey().equals(that.getAWSSecretKey()); + } + + @Override + public int hashCode() { + return Objects.hash(sessionToken, getAWSAccessKeyId(), getAWSSecretKey()); + } +} diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 27f2b305abd32..d4df4094fcf92 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -25,6 +25,7 @@ import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.StorageClass; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -39,8 +40,6 @@ class S3BlobStore implements BlobStore { private final S3Service service; - private final String clientName; - private final String bucket; private final ByteSizeValue bufferSize; @@ -51,15 +50,18 @@ class S3BlobStore implements BlobStore { private final StorageClass storageClass; - S3BlobStore(S3Service service, String clientName, String bucket, boolean serverSideEncryption, - ByteSizeValue bufferSize, String cannedACL, String storageClass) { + private final RepositoryMetaData repositoryMetaData; + + S3BlobStore(S3Service service, String bucket, boolean serverSideEncryption, + ByteSizeValue bufferSize, String cannedACL, String storageClass, + RepositoryMetaData repositoryMetaData) { this.service = service; - this.clientName = clientName; this.bucket = bucket; this.serverSideEncryption = serverSideEncryption; this.bufferSize = bufferSize; this.cannedACL = initCannedACL(cannedACL); this.storageClass = initStorageClass(storageClass); + this.repositoryMetaData = repositoryMetaData; } @Override @@ -68,7 +70,7 @@ public String toString() { } public AmazonS3Reference clientReference() { - return service.client(clientName); + return service.client(repositoryMetaData); } public String bucket() { diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java index 58fca161415a4..ea45fbaf93dd3 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java @@ -21,9 +21,6 @@ import com.amazonaws.ClientConfiguration; import com.amazonaws.Protocol; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.auth.BasicSessionCredentials; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; @@ -36,6 +33,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Set; /** @@ -46,6 +44,9 @@ final class S3ClientSettings { // prefix for s3 client settings private static final String PREFIX = "s3.client."; + /** Placeholder client name for normalizing client settings in the repository settings. */ + private static final String PLACEHOLDER_CLIENT = "placeholder"; + /** The access key (ie login id) for connecting to s3. */ static final Setting.AffixSetting ACCESS_KEY_SETTING = Setting.affixKeySetting(PREFIX, "access_key", key -> SecureSetting.secureString(key, null)); @@ -95,7 +96,7 @@ final class S3ClientSettings { key -> Setting.boolSetting(key, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.NodeScope)); /** Credentials to authenticate with s3. */ - final AWSCredentials credentials; + final S3BasicCredentials credentials; /** The s3 endpoint the client should talk to, or empty string to use the default. */ final String endpoint; @@ -126,7 +127,7 @@ final class S3ClientSettings { /** Whether the s3 client should use an exponential backoff retry policy. */ final boolean throttleRetries; - protected S3ClientSettings(AWSCredentials credentials, String endpoint, Protocol protocol, + private S3ClientSettings(S3BasicCredentials credentials, String endpoint, Protocol protocol, String proxyHost, int proxyPort, String proxyUsername, String proxyPassword, int readTimeoutMillis, int maxRetries, boolean throttleRetries) { this.credentials = credentials; @@ -141,6 +142,51 @@ protected S3ClientSettings(AWSCredentials credentials, String endpoint, Protocol this.throttleRetries = throttleRetries; } + /** + * Overrides the settings in this instance with settings found in repository metadata. + * + * @param metadata RepositoryMetaData + * @return S3ClientSettings + */ + S3ClientSettings refine(RepositoryMetaData metadata) { + final Settings repoSettings = metadata.settings(); + // Normalize settings to placeholder client settings prefix so that we can use the affix settings directly + final Settings normalizedSettings = + Settings.builder().put(repoSettings).normalizePrefix(PREFIX + PLACEHOLDER_CLIENT + '.').build(); + final String newEndpoint = getRepoSettingOrDefault(ENDPOINT_SETTING, normalizedSettings, endpoint); + + final Protocol newProtocol = getRepoSettingOrDefault(PROTOCOL_SETTING, normalizedSettings, protocol); + final String newProxyHost = getRepoSettingOrDefault(PROXY_HOST_SETTING, normalizedSettings, proxyHost); + final int newProxyPort = getRepoSettingOrDefault(PROXY_PORT_SETTING, normalizedSettings, proxyPort); + final int newReadTimeoutMillis = Math.toIntExact( + getRepoSettingOrDefault(READ_TIMEOUT_SETTING, normalizedSettings, TimeValue.timeValueMillis(readTimeoutMillis)).millis()); + final int newMaxRetries = getRepoSettingOrDefault(MAX_RETRIES_SETTING, normalizedSettings, maxRetries); + final boolean newThrottleRetries = getRepoSettingOrDefault(USE_THROTTLE_RETRIES_SETTING, normalizedSettings, throttleRetries); + final S3BasicCredentials newCredentials; + if (checkDeprecatedCredentials(repoSettings)) { + newCredentials = loadDeprecatedCredentials(repoSettings); + } else { + newCredentials = credentials; + } + if (Objects.equals(endpoint, newEndpoint) && protocol == newProtocol && Objects.equals(proxyHost, newProxyHost) + && proxyPort == newProxyPort && newReadTimeoutMillis == readTimeoutMillis && maxRetries == newMaxRetries + && newThrottleRetries == throttleRetries && Objects.equals(credentials, newCredentials)) { + return this; + } + return new S3ClientSettings( + newCredentials, + newEndpoint, + newProtocol, + newProxyHost, + newProxyPort, + proxyUsername, + proxyPassword, + newReadTimeoutMillis, + newMaxRetries, + newThrottleRetries + ); + } + /** * Load all client settings from the given settings. * @@ -175,24 +221,24 @@ static boolean checkDeprecatedCredentials(Settings repositorySettings) { } // backcompat for reading keys out of repository settings (clusterState) - static BasicAWSCredentials loadDeprecatedCredentials(Settings repositorySettings) { + private static S3BasicCredentials loadDeprecatedCredentials(Settings repositorySettings) { assert checkDeprecatedCredentials(repositorySettings); try (SecureString key = S3Repository.ACCESS_KEY_SETTING.get(repositorySettings); SecureString secret = S3Repository.SECRET_KEY_SETTING.get(repositorySettings)) { - return new BasicAWSCredentials(key.toString(), secret.toString()); + return new S3BasicCredentials(key.toString(), secret.toString()); } } - static AWSCredentials loadCredentials(Settings settings, String clientName) { + private static S3BasicCredentials loadCredentials(Settings settings, String clientName) { try (SecureString accessKey = getConfigValue(settings, clientName, ACCESS_KEY_SETTING); SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING); SecureString sessionToken = getConfigValue(settings, clientName, SESSION_TOKEN_SETTING)) { if (accessKey.length() != 0) { if (secretKey.length() != 0) { if (sessionToken.length() != 0) { - return new BasicSessionCredentials(accessKey.toString(), secretKey.toString(), sessionToken.toString()); + return new S3BasicSessionCredentials(accessKey.toString(), secretKey.toString(), sessionToken.toString()); } else { - return new BasicAWSCredentials(accessKey.toString(), secretKey.toString()); + return new S3BasicCredentials(accessKey.toString(), secretKey.toString()); } } else { throw new IllegalArgumentException("Missing secret key for s3 client [" + clientName + "]"); @@ -212,34 +258,48 @@ static AWSCredentials loadCredentials(Settings settings, String clientName) { // pkg private for tests /** Parse settings for a single client. */ static S3ClientSettings getClientSettings(final Settings settings, final String clientName) { - final AWSCredentials credentials = S3ClientSettings.loadCredentials(settings, clientName); - return getClientSettings(settings, clientName, credentials); - } - - static S3ClientSettings getClientSettings(final Settings settings, final String clientName, final AWSCredentials credentials) { try (SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING); SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING)) { return new S3ClientSettings( - credentials, - getConfigValue(settings, clientName, ENDPOINT_SETTING), - getConfigValue(settings, clientName, PROTOCOL_SETTING), - getConfigValue(settings, clientName, PROXY_HOST_SETTING), - getConfigValue(settings, clientName, PROXY_PORT_SETTING), - proxyUsername.toString(), - proxyPassword.toString(), - Math.toIntExact(getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis()), - getConfigValue(settings, clientName, MAX_RETRIES_SETTING), - getConfigValue(settings, clientName, USE_THROTTLE_RETRIES_SETTING) + S3ClientSettings.loadCredentials(settings, clientName), + getConfigValue(settings, clientName, ENDPOINT_SETTING), + getConfigValue(settings, clientName, PROTOCOL_SETTING), + getConfigValue(settings, clientName, PROXY_HOST_SETTING), + getConfigValue(settings, clientName, PROXY_PORT_SETTING), + proxyUsername.toString(), + proxyPassword.toString(), + Math.toIntExact(getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis()), + getConfigValue(settings, clientName, MAX_RETRIES_SETTING), + getConfigValue(settings, clientName, USE_THROTTLE_RETRIES_SETTING) ); } } - static S3ClientSettings getClientSettings(final RepositoryMetaData metadata, final AWSCredentials credentials) { - final Settings.Builder builder = Settings.builder(); - for (final String key : metadata.settings().keySet()) { - builder.put(PREFIX + "provided" + "." + key, metadata.settings().get(key)); + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; } - return getClientSettings(builder.build(), "provided", credentials); + final S3ClientSettings that = (S3ClientSettings) o; + return proxyPort == that.proxyPort && + readTimeoutMillis == that.readTimeoutMillis && + maxRetries == that.maxRetries && + throttleRetries == that.throttleRetries && + Objects.equals(credentials, that.credentials) && + Objects.equals(endpoint, that.endpoint) && + protocol == that.protocol && + Objects.equals(proxyHost, that.proxyHost) && + Objects.equals(proxyUsername, that.proxyUsername) && + Objects.equals(proxyPassword, that.proxyPassword); + } + + @Override + public int hashCode() { + return Objects.hash(credentials, endpoint, protocol, proxyHost, proxyPort, proxyUsername, proxyPassword, + readTimeoutMillis, maxRetries, throttleRetries); } private static T getConfigValue(Settings settings, String clientName, @@ -248,4 +308,10 @@ private static T getConfigValue(Settings settings, String clientName, return concreteSetting.get(settings); } + private static T getRepoSettingOrDefault(Setting.AffixSetting setting, Settings normalizedSettings, T defaultValue) { + if (setting.getConcreteSettingForNamespace(PLACEHOLDER_CLIENT).exists(normalizedSettings)) { + return getConfigValue(normalizedSettings, PLACEHOLDER_CLIENT, setting); + } + return defaultValue; + } } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index e0e34e40f3cf8..b1d29d89a59c0 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories.s3; -import com.amazonaws.auth.BasicAWSCredentials; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.elasticsearch.cluster.metadata.RepositoryMetaData; @@ -148,8 +147,6 @@ class S3Repository extends BlobStoreRepository { */ static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path"); - private final Settings settings; - private final S3Service service; private final String bucket; @@ -168,9 +165,7 @@ class S3Repository extends BlobStoreRepository { private final String cannedACL; - private final String clientName; - - private final AmazonS3Reference reference; + private final RepositoryMetaData repositoryMetaData; /** * Constructs an s3 backed repository @@ -180,9 +175,10 @@ class S3Repository extends BlobStoreRepository { final NamedXContentRegistry namedXContentRegistry, final S3Service service) { super(metadata, settings, namedXContentRegistry); - this.settings = settings; this.service = service; + this.repositoryMetaData = metadata; + // Parse and validate the user's S3 Storage Class setting this.bucket = BUCKET_SETTING.get(metadata.settings()); if (bucket == null) { @@ -211,24 +207,10 @@ class S3Repository extends BlobStoreRepository { this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); - this.clientName = CLIENT_NAME.get(metadata.settings()); - - if (CLIENT_NAME.exists(metadata.settings()) && S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { - logger.warn( - "ignoring use of named client [{}] for repository [{}] as insecure credentials were specified", - clientName, - metadata.name()); - } - if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) { // provided repository settings deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead " + "store these in named clients and the elasticsearch keystore for secure settings."); - final BasicAWSCredentials insecureCredentials = S3ClientSettings.loadDeprecatedCredentials(metadata.settings()); - final S3ClientSettings s3ClientSettings = S3ClientSettings.getClientSettings(metadata, insecureCredentials); - this.reference = new AmazonS3Reference(service.buildClient(s3ClientSettings)); - } else { - reference = null; } logger.debug( @@ -243,21 +225,7 @@ class S3Repository extends BlobStoreRepository { @Override protected S3BlobStore createBlobStore() { - if (reference != null) { - assert S3ClientSettings.checkDeprecatedCredentials(metadata.settings()) : metadata.name(); - return new S3BlobStore(service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass) { - @Override - public AmazonS3Reference clientReference() { - if (reference.tryIncRef()) { - return reference; - } else { - throw new IllegalStateException("S3 client is closed"); - } - } - }; - } else { - return new S3BlobStore(service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); - } + return new S3BlobStore(service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass, repositoryMetaData); } // only use for testing @@ -286,14 +254,4 @@ protected boolean isCompress() { protected ByteSizeValue chunkSize() { return chunkSize; } - - @Override - protected void doClose() { - if (reference != null) { - assert S3ClientSettings.checkDeprecatedCredentials(metadata.settings()) : metadata.name(); - reference.decRef(); - } - super.doClose(); - } - } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index a5ee861d0c38b..b0c8a6198130a 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -22,18 +22,20 @@ import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper; import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.http.IdleConnectionReaper; -import com.amazonaws.internal.StaticCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.internal.Constants; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.settings.Settings; import java.io.Closeable; import java.io.IOException; @@ -43,56 +45,96 @@ class S3Service implements Closeable { - private static final Logger logger = LogManager.getLogger(S3Service.class); - private volatile Map clientsCache = emptyMap(); - private volatile Map clientsSettings = emptyMap(); + private volatile Map clientsCache = emptyMap(); + + /** + * Client settings calculated from static configuration and settings in the keystore. + */ + private volatile Map staticClientSettings = MapBuilder.newMapBuilder() + .put("default", S3ClientSettings.getClientSettings(Settings.EMPTY, "default")).immutableMap(); + + /** + * Client settings derived from those in {@link #staticClientSettings} by combining them with settings + * in the {@link RepositoryMetaData}. + */ + private volatile Map> derivedClientSettings = emptyMap(); /** * Refreshes the settings for the AmazonS3 clients and clears the cache of * existing clients. New clients will be build using these new settings. Old * clients are usable until released. On release they will be destroyed instead - * to being returned to the cache. + * of being returned to the cache. */ - public synchronized Map refreshAndClearCache(Map clientsSettings) { + public synchronized void refreshAndClearCache(Map clientsSettings) { // shutdown all unused clients // others will shutdown on their respective release releaseCachedClients(); - final Map prevSettings = this.clientsSettings; - this.clientsSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); - assert this.clientsSettings.containsKey("default") : "always at least have 'default'"; - // clients are built lazily by {@link client(String)} - return prevSettings; + this.staticClientSettings = MapBuilder.newMapBuilder(clientsSettings).immutableMap(); + derivedClientSettings = emptyMap(); + assert this.staticClientSettings.containsKey("default") : "always at least have 'default'"; + // clients are built lazily by {@link client} } /** - * Attempts to retrieve a client by name from the cache. If the client does not - * exist it will be created. + * Attempts to retrieve a client by its repository metadata and settings from the cache. + * If the client does not exist it will be created. */ - public AmazonS3Reference client(String clientName) { - AmazonS3Reference clientReference = clientsCache.get(clientName); - if ((clientReference != null) && clientReference.tryIncRef()) { - return clientReference; - } - synchronized (this) { - clientReference = clientsCache.get(clientName); - if ((clientReference != null) && clientReference.tryIncRef()) { + public AmazonS3Reference client(RepositoryMetaData repositoryMetaData) { + final S3ClientSettings clientSettings = settings(repositoryMetaData); + { + final AmazonS3Reference clientReference = clientsCache.get(clientSettings); + if (clientReference != null && clientReference.tryIncRef()) { return clientReference; } - final S3ClientSettings clientSettings = clientsSettings.get(clientName); - if (clientSettings == null) { - throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. Existing client configs: " - + Strings.collectionToDelimitedString(clientsSettings.keySet(), ",")); + } + synchronized (this) { + final AmazonS3Reference existing = clientsCache.get(clientSettings); + if (existing != null && existing.tryIncRef()) { + return existing; } - logger.debug("creating S3 client with client_name [{}], endpoint [{}]", clientName, clientSettings.endpoint); - clientReference = new AmazonS3Reference(buildClient(clientSettings)); + final AmazonS3Reference clientReference = new AmazonS3Reference(buildClient(clientSettings)); clientReference.incRef(); - clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientName, clientReference).immutableMap(); + clientsCache = MapBuilder.newMapBuilder(clientsCache).put(clientSettings, clientReference).immutableMap(); return clientReference; } } + /** + * Either fetches {@link S3ClientSettings} for a given {@link RepositoryMetaData} from cached settings or creates them + * by overriding static client settings from {@link #staticClientSettings} with settings found in the repository metadata. + * @param repositoryMetaData Repository Metadata + * @return S3ClientSettings + */ + private S3ClientSettings settings(RepositoryMetaData repositoryMetaData) { + final String clientName = S3Repository.CLIENT_NAME.get(repositoryMetaData.settings()); + final S3ClientSettings staticSettings = staticClientSettings.get(clientName); + if (staticSettings != null) { + { + final S3ClientSettings existing = derivedClientSettings.getOrDefault(staticSettings, emptyMap()).get(repositoryMetaData); + if (existing != null) { + return existing; + } + } + synchronized (this) { + final Map derivedSettings = + derivedClientSettings.getOrDefault(staticSettings, emptyMap()); + final S3ClientSettings existing = derivedSettings.get(repositoryMetaData); + if (existing != null) { + return existing; + } + final S3ClientSettings newSettings = staticSettings.refine(repositoryMetaData); + derivedClientSettings = MapBuilder.newMapBuilder(derivedClientSettings).put( + staticSettings, MapBuilder.newMapBuilder(derivedSettings).put(repositoryMetaData, newSettings).immutableMap() + ).immutableMap(); + return newSettings; + } + } + throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. Existing client configs: " + + Strings.collectionToDelimitedString(staticClientSettings.keySet(), ",")); + } + // proxy for testing AmazonS3 buildClient(final S3ClientSettings clientSettings) { final AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); @@ -141,17 +183,17 @@ static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings) { // pkg private for tests static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings clientSettings) { - final AWSCredentials credentials = clientSettings.credentials; + final S3BasicCredentials credentials = clientSettings.credentials; if (credentials == null) { logger.debug("Using instance profile credentials"); return new PrivilegedInstanceProfileCredentialsProvider(); } else { logger.debug("Using basic key/secret credentials"); - return new StaticCredentialsProvider(credentials); + return new AWSStaticCredentialsProvider(credentials); } } - protected synchronized void releaseCachedClients() { + private synchronized void releaseCachedClients() { // the clients will shutdown when they will not be used anymore for (final AmazonS3Reference clientReference : clientsCache.values()) { clientReference.decRef(); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index 0c14f44d8b613..2f5982b77ce62 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -22,7 +22,7 @@ import com.amazonaws.ClientConfiguration; import com.amazonaws.Protocol; import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.internal.StaticCredentialsProvider; +import com.amazonaws.auth.AWSStaticCredentialsProvider; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; @@ -61,7 +61,7 @@ public void testAWSCredentialsFromKeystore() { final String clientName = clientNamePrefix + i; final S3ClientSettings someClientSettings = allClientsSettings.get(clientName); final AWSCredentialsProvider credentialsProvider = S3Service.buildCredentials(logger, someClientSettings); - assertThat(credentialsProvider, instanceOf(StaticCredentialsProvider.class)); + assertThat(credentialsProvider, instanceOf(AWSStaticCredentialsProvider.class)); assertThat(credentialsProvider.getCredentials().getAWSAccessKeyId(), is(clientName + "_aws_access_key")); assertThat(credentialsProvider.getCredentials().getAWSSecretKey(), is(clientName + "_aws_secret_key")); } @@ -83,7 +83,7 @@ public void testSetDefaultCredential() { // test default exists and is an Instance provider final S3ClientSettings defaultClientSettings = allClientsSettings.get("default"); final AWSCredentialsProvider defaultCredentialsProvider = S3Service.buildCredentials(logger, defaultClientSettings); - assertThat(defaultCredentialsProvider, instanceOf(StaticCredentialsProvider.class)); + assertThat(defaultCredentialsProvider, instanceOf(AWSStaticCredentialsProvider.class)); assertThat(defaultCredentialsProvider.getCredentials().getAWSAccessKeyId(), is(awsAccessKey)); assertThat(defaultCredentialsProvider.getCredentials().getAWSSecretKey(), is(awsSecretKey)); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index b4c2f81a3f8b8..739452dc178c4 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -59,7 +59,6 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa private static final ConcurrentMap blobs = new ConcurrentHashMap<>(); private static String bucket; - private static String client; private static ByteSizeValue bufferSize; private static boolean serverSideEncryption; private static String cannedACL; @@ -68,7 +67,6 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa @BeforeClass public static void setUpRepositorySettings() { bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - client = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); bufferSize = new ByteSizeValue(randomIntBetween(5, 50), ByteSizeUnit.MB); serverSideEncryption = randomBoolean(); if (randomBoolean()) { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java index a44ad706b2361..6640a0dc4d71c 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java @@ -22,8 +22,10 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.amazonaws.services.s3.model.StorageClass; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreException; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.ESBlobStoreTestCase; @@ -114,15 +116,14 @@ public static S3BlobStore randomMockS3BlobStore() { storageClass = randomValueOtherThan(StorageClass.Glacier, () -> randomFrom(StorageClass.values())).toString(); } - final String theClientName = randomAlphaOfLength(4); final AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass); final S3Service service = new S3Service() { @Override - public synchronized AmazonS3Reference client(String clientName) { - assert theClientName.equals(clientName); + public synchronized AmazonS3Reference client(RepositoryMetaData repositoryMetaData) { return new AmazonS3Reference(client); } }; - return new S3BlobStore(service, theClientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); + return new S3BlobStore(service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass, + new RepositoryMetaData(bucket, "s3", Settings.EMPTY)); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java index e629f43f8a3d3..53740672df329 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java @@ -21,8 +21,7 @@ import com.amazonaws.ClientConfiguration; import com.amazonaws.Protocol; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.auth.BasicSessionCredentials; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; @@ -103,7 +102,7 @@ public void testCredentialsTypeWithAccessKeyAndSecretKey() { secureSettings.setString("s3.client.default.secret_key", "secret_key"); final Map settings = S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()); final S3ClientSettings defaultSettings = settings.get("default"); - BasicAWSCredentials credentials = (BasicAWSCredentials) defaultSettings.credentials; + S3BasicCredentials credentials = defaultSettings.credentials; assertThat(credentials.getAWSAccessKeyId(), is("access_key")); assertThat(credentials.getAWSSecretKey(), is("secret_key")); } @@ -115,9 +114,34 @@ public void testCredentialsTypeWithAccessKeyAndSecretKeyAndSessionToken() { secureSettings.setString("s3.client.default.session_token", "session_token"); final Map settings = S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()); final S3ClientSettings defaultSettings = settings.get("default"); - BasicSessionCredentials credentials = (BasicSessionCredentials) defaultSettings.credentials; + S3BasicSessionCredentials credentials = (S3BasicSessionCredentials) defaultSettings.credentials; assertThat(credentials.getAWSAccessKeyId(), is("access_key")); assertThat(credentials.getAWSSecretKey(), is("secret_key")); assertThat(credentials.getSessionToken(), is("session_token")); } + + public void testRefineWithRepoSettings() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.access_key", "access_key"); + secureSettings.setString("s3.client.default.secret_key", "secret_key"); + secureSettings.setString("s3.client.default.session_token", "session_token"); + final S3ClientSettings baseSettings = S3ClientSettings.load( + Settings.builder().setSecureSettings(secureSettings).build()).get("default"); + + { + final S3ClientSettings refinedSettings = baseSettings.refine(new RepositoryMetaData("name", "type", Settings.EMPTY)); + assertTrue(refinedSettings == baseSettings); + } + + { + final String endpoint = "some.host"; + final S3ClientSettings refinedSettings = baseSettings.refine(new RepositoryMetaData("name", "type", + Settings.builder().put("endpoint", endpoint).build())); + assertThat(refinedSettings.endpoint, is(endpoint)); + S3BasicSessionCredentials credentials = (S3BasicSessionCredentials) refinedSettings.credentials; + assertThat(credentials.getAWSAccessKeyId(), is("access_key")); + assertThat(credentials.getAWSSecretKey(), is("secret_key")); + assertThat(credentials.getSessionToken(), is("session_token")); + } + } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 60ff802c4fc02..36fa8b684bbb9 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; -import java.util.Collections; import java.util.Map; import static org.hamcrest.Matchers.containsString; @@ -49,13 +48,12 @@ public void shutdown() { private static class DummyS3Service extends S3Service { @Override - public AmazonS3Reference client(String clientName) { + public AmazonS3Reference client(RepositoryMetaData repositoryMetaData) { return new AmazonS3Reference(new DummyS3Client()); } @Override - public Map refreshAndClearCache(Map clientsSettings) { - return Collections.emptyMap(); + public void refreshAndClearCache(Map clientsSettings) { } @Override diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index 1c64b66be3ad2..97bb36163b11d 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -29,6 +29,86 @@ setup: snapshot: snapshot-two ignore: 404 +--- +"Try to create repository with broken endpoint override and named client": + + # Register repository with broken endpoint setting + - do: + catch: /repository_verification_exception/ + snapshot.create_repository: + repository: repository_broken + body: + type: s3 + settings: + bucket: ${permanent_bucket} + client: integration_test_permanent + base_path: "${permanent_base_path}" + endpoint: 127.0.0.1:5 + canned_acl: private + storage_class: standard + + # Turn of verification to be able to create the repo with broken endpoint setting + - do: + snapshot.create_repository: + verify: false + repository: repository_broken + body: + type: s3 + settings: + bucket: ${permanent_bucket} + client: integration_test_permanent + base_path: "${permanent_base_path}" + endpoint: 127.0.0.1:5 + canned_acl: private + storage_class: standard + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Creating snapshot with broken repo should fail + - do: + catch: /repository_exception/ + snapshot.create: + repository: repository_broken + snapshot: snapshot-one + wait_for_completion: true + + # Creating snapshot with existing working repository should work + - do: + snapshot.create: + repository: repository_permanent + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state: SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed: 0 } + --- "Snapshot and Restore with repository-s3 using permanent credentials": diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 43bb7401dba58..1b2503ccb99d5 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -35,6 +35,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.rest.action.document.RestGetAction; import org.elasticsearch.rest.action.document.RestUpdateAction; @@ -921,6 +923,7 @@ public void testSnapshotRestore() throws IOException { // We therefore use the deprecated typed APIs when running against the current version. if (isRunningAgainstOldCluster() == false) { createTemplateRequest.addParameter(INCLUDE_TYPE_NAME_PARAMETER, "true"); + createTemplateRequest.setOptions(expectWarnings(RestPutIndexTemplateAction.TYPES_DEPRECATION_MESSAGE)); } client().performRequest(createTemplateRequest); @@ -1122,6 +1125,7 @@ private void checkSnapshot(String snapshotName, int count, Version tookOnVersion // We therefore use the deprecated typed APIs when running against the current version. if (isRunningAgainstOldCluster() == false) { getTemplateRequest.addParameter(INCLUDE_TYPE_NAME_PARAMETER, "true"); + getTemplateRequest.setOptions(expectWarnings(RestGetIndexTemplateAction.TYPES_DEPRECATION_MESSAGE)); } Map getTemplateResponse = entityAsMap(client().performRequest(getTemplateRequest)); diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index 10bdcc234c656..f22b1b44c0763 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -201,7 +201,7 @@ public void testQueryBuilderBWC() throws Exception { QueryBuilder expectedQueryBuilder = (QueryBuilder) CANDIDATES.get(i)[1]; Request request = new Request("GET", "/" + index + "/_search"); request.setJsonEntity("{\"query\": {\"ids\": {\"values\": [\"" + Integer.toString(i) + "\"]}}, " + - "\"docvalue_fields\": [{\"field\":\"query.query_builder_field\", \"format\":\"use_field_mapping\"}]}"); + "\"docvalue_fields\": [{\"field\":\"query.query_builder_field\"}]}"); Response rsp = client().performRequest(request); assertEquals(200, rsp.getStatusLine().getStatusCode()); Map hitRsp = (Map) ((List) ((Map)toMap(rsp).get("hits")).get("hits")).get(0); diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml index f50a3fd9ea42d..2605836f8573c 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml @@ -67,3 +67,9 @@ field3: value - match: { hits.total: 1 } - match: { hits.hits.0._id: q3 } + +--- +"Index with _all is available": + - do: + indices.get: + index: all-index diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml index ffa9e0ce2a6f8..9e06f767d4892 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml @@ -203,3 +203,21 @@ tasks.get: wait_for_completion: true task_id: $task + +--- +"Create an index with _all explicitly disabled": + - skip: + features: warnings + - do: + warnings: + - "[_all] is deprecated in 6.0+ and will be removed in 7.0. As a replacement, you can use [copy_to] on mapping fields to create your own catch all field." + indices.create: + index: all-index + body: + mappings: + type: + _all: + enabled: false + properties: + field: + type: text diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 39c72dfd5334b..508a898e0cdb5 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -125,3 +125,17 @@ wait_for_completion: true task_id: $task_id - match: { task.headers.X-Opaque-Id: "Reindexing Again" } + +--- +"Index with _all is available": + - do: + indices.get: + index: all-index + + - do: + indices.get_mapping: + index: all-index + + - is_true: all-index.mappings._all + - match: { all-index.mappings._all.enabled: false} + diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java index 05b3603628adb..6261cd62a2b12 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java @@ -285,18 +285,20 @@ public void test90SecurityCliPackaging() { final Installation.Executables bin = installation.executables(); final Shell sh = new Shell(); - if (distribution().equals(Distribution.DEFAULT_TAR) || distribution().equals(Distribution.DEFAULT_ZIP)) { + if (distribution().equals(Distribution.DEFAULT_LINUX) || distribution().equals(Distribution.DEFAULT_WINDOWS)) { assertTrue(Files.exists(installation.lib.resolve("tools").resolve("security-cli"))); - Platforms.onLinux(() -> { - final Result result = sh.run(bin.elasticsearchCertutil + " help"); + final Platforms.PlatformAction action = () -> { + Result result = sh.run(bin.elasticsearchCertutil + " --help"); assertThat(result.stdout, containsString("Simplifies certificate creation for use with the Elastic Stack")); - }); - Platforms.onWindows(() -> { - final Result result = sh.run(bin.elasticsearchCertutil + " help"); - assertThat(result.stdout, containsString("Simplifies certificate creation for use with the Elastic Stack")); - }); - } else if (distribution().equals(Distribution.OSS_TAR) || distribution().equals(Distribution.OSS_ZIP)) { + // Ensure that the exit code from the java command is passed back up through the shell script + result = sh.runIgnoreExitCode(bin.elasticsearchCertutil + " invalid-command"); + assertThat(result.exitCode, is(64)); + assertThat(result.stdout, containsString("Unknown command [invalid-command]")); + }; + Platforms.onLinux(action); + Platforms.onWindows(action); + } else if (distribution().equals(Distribution.OSS_LINUX) || distribution().equals(Distribution.OSS_WINDOWS)) { assertFalse(Files.exists(installation.lib.resolve("tools").resolve("security-cli"))); } } @@ -312,7 +314,7 @@ public void test100ElasticsearchShardCliPackaging() { assertThat(result.stdout, containsString("A CLI tool to remove corrupted parts of unrecoverable shards")); }; - if (distribution().equals(Distribution.DEFAULT_TAR) || distribution().equals(Distribution.DEFAULT_ZIP)) { + if (distribution().equals(Distribution.DEFAULT_LINUX) || distribution().equals(Distribution.DEFAULT_WINDOWS)) { Platforms.onLinux(action); Platforms.onWindows(action); } @@ -330,7 +332,7 @@ public void test110ElasticsearchNodeCliPackaging() { containsString("A CLI tool to unsafely recover a cluster after the permanent loss of too many master-eligible nodes")); }; - if (distribution().equals(Distribution.DEFAULT_TAR) || distribution().equals(Distribution.DEFAULT_ZIP)) { + if (distribution().equals(Distribution.DEFAULT_LINUX) || distribution().equals(Distribution.DEFAULT_WINDOWS)) { Platforms.onLinux(action); Platforms.onWindows(action); } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultTarTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultTarTests.java index 9b359a329c1bb..b6c633c0e72a2 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultTarTests.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultTarTests.java @@ -25,6 +25,6 @@ public class DefaultTarTests extends ArchiveTestCase { @Override protected Distribution distribution() { - return Distribution.DEFAULT_TAR; + return Distribution.DEFAULT_LINUX; } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultWindowsServiceTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultWindowsServiceTests.java index 072c3da68868f..6fedd3b89db70 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultWindowsServiceTests.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultWindowsServiceTests.java @@ -25,6 +25,6 @@ public class DefaultWindowsServiceTests extends WindowsServiceTestCase { @Override protected Distribution distribution() { - return Distribution.DEFAULT_ZIP; + return Distribution.DEFAULT_WINDOWS; } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultZipTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultZipTests.java index d9a6353a8c6f3..852535188cf9b 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultZipTests.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/DefaultZipTests.java @@ -25,6 +25,6 @@ public class DefaultZipTests extends ArchiveTestCase { @Override protected Distribution distribution() { - return Distribution.DEFAULT_ZIP; + return Distribution.DEFAULT_WINDOWS; } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssTarTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssTarTests.java index 86637fc9d48e3..3e72f1da5cbb0 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssTarTests.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssTarTests.java @@ -25,6 +25,6 @@ public class OssTarTests extends ArchiveTestCase { @Override protected Distribution distribution() { - return Distribution.OSS_TAR; + return Distribution.OSS_LINUX; } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssWindowsServiceTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssWindowsServiceTests.java index f4de95f67b6db..bfa220c6aaf22 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssWindowsServiceTests.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssWindowsServiceTests.java @@ -25,6 +25,6 @@ public class OssWindowsServiceTests extends WindowsServiceTestCase { @Override protected Distribution distribution() { - return Distribution.OSS_ZIP; + return Distribution.OSS_WINDOWS; } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssZipTests.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssZipTests.java index b6cd1e596a09b..418ba6d344650 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssZipTests.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/OssZipTests.java @@ -25,6 +25,6 @@ public class OssZipTests extends ArchiveTestCase { @Override protected Distribution distribution() { - return Distribution.OSS_ZIP; + return Distribution.OSS_WINDOWS; } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/RpmPreservationTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/RpmPreservationTestCase.java index 4787766ae3b61..36558ea2429cc 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/RpmPreservationTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/RpmPreservationTestCase.java @@ -43,10 +43,7 @@ import static org.elasticsearch.packaging.util.Platforms.isRPM; import static org.elasticsearch.packaging.util.Platforms.isSystemd; import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeThat; import static org.junit.Assume.assumeTrue; diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java index db48ed753b271..ed579c35baf56 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Archives.java @@ -70,7 +70,7 @@ public static Installation installArchive(Distribution distribution, Path fullIn final Path baseInstallPath = fullInstallPath.getParent(); final Path extractedPath = baseInstallPath.resolve("elasticsearch-" + version); - assertThat("distribution file must exist", Files.exists(distributionFile), is(true)); + assertThat("distribution file must exist: " + distributionFile.toString(), Files.exists(distributionFile), is(true)); assertThat("elasticsearch must not already be installed", lsGlob(baseInstallPath, "elasticsearch*"), empty()); if (distribution.packaging == Distribution.Packaging.TAR) { diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Distribution.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Distribution.java index 7e1067fdb351c..5ed1bcb5a3d1f 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Distribution.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Distribution.java @@ -19,28 +19,46 @@ package org.elasticsearch.packaging.util; +import java.util.Locale; + public enum Distribution { - OSS_TAR(Packaging.TAR, Flavor.OSS), - OSS_ZIP(Packaging.ZIP, Flavor.OSS), - OSS_DEB(Packaging.DEB, Flavor.OSS), - OSS_RPM(Packaging.RPM, Flavor.OSS), + OSS_LINUX(Packaging.TAR, Platform.LINUX, Flavor.OSS), + OSS_WINDOWS(Packaging.ZIP, Platform.WINDOWS, Flavor.OSS), + OSS_DARWIN(Packaging.TAR, Platform.DARWIN, Flavor.OSS), + OSS_DEB(Packaging.DEB, Platform.LINUX, Flavor.OSS), + OSS_RPM(Packaging.RPM, Platform.LINUX, Flavor.OSS), - DEFAULT_TAR(Packaging.TAR, Flavor.DEFAULT), - DEFAULT_ZIP(Packaging.ZIP, Flavor.DEFAULT), - DEFAULT_DEB(Packaging.DEB, Flavor.DEFAULT), - DEFAULT_RPM(Packaging.RPM, Flavor.DEFAULT); + DEFAULT_LINUX(Packaging.TAR, Platform.LINUX, Flavor.DEFAULT), + DEFAULT_WINDOWS(Packaging.ZIP, Platform.WINDOWS, Flavor.DEFAULT), + DEFAULT_DARWIN(Packaging.TAR, Platform.DARWIN, Flavor.DEFAULT), + DEFAULT_DEB(Packaging.DEB, Platform.LINUX, Flavor.DEFAULT), + DEFAULT_RPM(Packaging.RPM, Platform.LINUX, Flavor.DEFAULT); public final Packaging packaging; + public final Platform platform; public final Flavor flavor; - Distribution(Packaging packaging, Flavor flavor) { + Distribution(Packaging packaging, Platform platform, Flavor flavor) { this.packaging = packaging; + this.platform = platform; this.flavor = flavor; } public String filename(String version) { - return flavor.name + "-" + version + packaging.extension; + String architecture = ""; + if (version.startsWith("6.") == false) { + + if (packaging == Packaging.DEB) { + architecture = "-amd64"; + } else { + if (packaging != Packaging.RPM) { + architecture = "-" + platform.toString(); + } + architecture += "-x86_64"; + } + } + return flavor.name + "-" + version + architecture + packaging.extension; } public boolean isDefault() { @@ -53,8 +71,8 @@ public boolean isOSS() { public enum Packaging { - TAR(".tar.gz", Platforms.LINUX), - ZIP(".zip", true), + TAR(".tar.gz", Platforms.LINUX || Platforms.DARWIN), + ZIP(".zip", Platforms.WINDOWS), DEB(".deb", Platforms.isDPKG()), RPM(".rpm", Platforms.isRPM()); @@ -70,6 +88,16 @@ public enum Packaging { } } + public enum Platform { + LINUX, + WINDOWS, + DARWIN; + + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } + public enum Flavor { OSS("elasticsearch-oss"), diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Platforms.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Platforms.java index c7ca1284ca69a..dbac9c88d26c9 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Platforms.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Platforms.java @@ -27,6 +27,7 @@ public class Platforms { public static final String OS_NAME = System.getProperty("os.name"); public static final boolean LINUX = OS_NAME.startsWith("Linux"); public static final boolean WINDOWS = OS_NAME.startsWith("Windows"); + public static final boolean DARWIN = OS_NAME.startsWith("Mac OS X"); public static String getOsRelease() { if (LINUX) { diff --git a/qa/vagrant/src/test/resources/packaging/utils/packages.bash b/qa/vagrant/src/test/resources/packaging/utils/packages.bash index d86f1c64e2e69..0285e3682b3f7 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/packages.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/packages.bash @@ -73,10 +73,16 @@ install_package() { ;; esac done + local rpm_classifier="-x86_64" + local deb_classifier="-amd64" + if [[ $version == 6* ]]; then + rpm_classifier="" + deb_classifier="" + fi if is_rpm; then - rpm $rpmCommand $PACKAGE_NAME-$version.rpm + rpm $rpmCommand $PACKAGE_NAME-$version$rpm_classifier.rpm elif is_dpkg; then - run dpkg $dpkgCommand -i $PACKAGE_NAME-$version.deb + run dpkg $dpkgCommand -i $PACKAGE_NAME-$version$deb_classifier.deb [[ "$status" -eq 0 ]] || { echo "dpkg failed:" echo "$output" diff --git a/qa/vagrant/src/test/resources/packaging/utils/tar.bash b/qa/vagrant/src/test/resources/packaging/utils/tar.bash index eb2e39274e29e..142215a703682 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/tar.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/tar.bash @@ -40,7 +40,7 @@ install_archive() { echo "Unpacking tarball to $ESHOME" rm -rf /tmp/untar mkdir -p /tmp/untar - tar -xzpf "${PACKAGE_NAME}-${version}.tar.gz" -C /tmp/untar + tar -xzpf "${PACKAGE_NAME}-${version}-linux-x86_64.tar.gz" -C /tmp/untar find /tmp/untar -depth -type d -name 'elasticsearch*' -exec mv {} "$ESHOME" \; > /dev/null diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml new file mode 100644 index 0000000000000..d196d5e5dd8c2 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.create/20_mix_typeless_typeful.yml @@ -0,0 +1,137 @@ +--- +"Create a typeless index while there is a typed template": + + - skip: + version: " - 6.99.99" + reason: needs change to be backported to 6.7 + + - do: + indices.put_template: + include_type_name: true + name: test_template + body: + index_patterns: test-* + mappings: + my_type: + properties: + foo: + type: keyword + + - do: + indices.create: + include_type_name: false + index: test-1 + body: + mappings: + properties: + bar: + type: "long" + + - do: + indices.get_mapping: + include_type_name: true + index: test-1 + + - is_true: test-1.mappings._doc # the index creation call won + - is_false: test-1.mappings.my_type + - is_true: test-1.mappings._doc.properties.foo + - is_true: test-1.mappings._doc.properties.bar + +--- +"Create a typed index while there is a typeless template": + + - skip: + version: " - 6.99.99" + reason: needs change to be backported to 6.7 + + - do: + indices.put_template: + include_type_name: false + name: test_template + body: + index_patterns: test-* + mappings: + properties: + foo: + type: keyword + + - do: + indices.create: + include_type_name: true + index: test-1 + body: + mappings: + my_type: + properties: + bar: + type: "long" + + - do: + indices.get_mapping: + include_type_name: true + index: test-1 + + - is_true: test-1.mappings.my_type # the index creation call won + - is_false: test-1.mappings._doc + - is_true: test-1.mappings.my_type.properties.foo + - is_true: test-1.mappings.my_type.properties.bar + +--- +"Implicitly create a typed index while there is a typeless template": + + - skip: + version: " - 6.99.99" + reason: needs change to be backported to 6.7 + + - do: + indices.put_template: + include_type_name: false + name: test_template + body: + index_patterns: test-* + mappings: + properties: + foo: + type: keyword + + - do: + catch: /the final mapping would have more than 1 type/ + index: + index: test-1 + type: my_type + body: { bar: 42 } + +--- +"Implicitly create a typeless index while there is a typed template": + + - skip: + version: " - 6.99.99" + reason: needs typeless index operations to work on typed indices + + - do: + indices.put_template: + include_type_name: true + name: test_template + body: + index_patterns: test-* + mappings: + my_type: + properties: + foo: + type: keyword + + - do: + index: + index: test-1 + type: my_type + body: { bar: 42 } + + - do: + indices.get_mapping: + include_type_name: true + index: test-1 + + - is_true: test-1.mappings.my_type # the template is honored + - is_false: test-1.mappings._doc + - is_true: test-1.mappings.my_type.properties.foo + - is_true: test-1.mappings.my_type.properties.bar diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml index 6338598de05d0..f3dc3ac86bc9f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/10_basic.yml @@ -59,7 +59,7 @@ setup: - do: indices.get_alias: - name: _all + name: '*' - match: {test_index.aliases.test_alias: {}} - match: {test_index.aliases.test_blias: {}} @@ -220,7 +220,7 @@ setup: - is_false: test_index_2.aliases.test_blias --- -"Get aliases via /pref*/_alias/{name}": +"Get aliases via /*suf/_alias/{name}": - do: indices.get_alias: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/30_wildcards.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/30_wildcards.yml new file mode 100644 index 0000000000000..08b3009be0e88 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.get_alias/30_wildcards.yml @@ -0,0 +1,140 @@ +--- +setup: + + - do: + indices.create: + index: test_index + body: + aliases: + test_alias_1: {} + test_alias_2: {} + test_blias_1: {} + test_blias_2: {} + test: {} + +--- +"Get aliases wildcard and inclusion": + - do: + indices.get_alias: + name: test_alias*,test_blias_1 + + - match: {test_index.aliases.test_alias_1: {}} + - match: {test_index.aliases.test_alias_2: {}} + - match: {test_index.aliases.test_blias_1: {}} + - is_false: test_index.aliases.test_blias_2 + - is_false: test_index.aliases.test + +--- +"Get aliases wildcard and simple exclusion": + - skip: + version: " - 6.99.99" + reason: Exclusions in the alias expression are not handled + - do: + indices.get_alias: + name: test_blias_2,test_alias*,-test_alias_1 + + - is_false: test_index.aliases.test_alias_1 + - match: {test_index.aliases.test_alias_2: {}} + - is_false: test_index.aliases.test_blias_1 + - match: {test_index.aliases.test_blias_2: {}} + - is_false: test_index.aliases.test + +--- +"Get aliases and wildcard exclusion": + - skip: + version: " - 6.99.99" + reason: Exclusions in the alias expression are not handled + - do: + indices.get_alias: + name: test_alias_1,test_blias_1,-test_alias* + + - is_false: test_index.aliases.test_alias_1 + - is_false: test_index.aliases.test_alias_2 + - match: {test_index.aliases.test_blias_1: {}} + - is_false: test_index.aliases.test_blias_2 + - is_false: test_index.aliases.test + + - do: + indices.get_alias: + name: test_blias_2,tes*,-test_alias* + + - is_false: test_index.aliases.test_alias_1 + - is_false: test_index.aliases.test_alias_2 + - match: {test_index.aliases.test_blias_1: {}} + - match: {test_index.aliases.test_blias_2: {}} + - match: {test_index.aliases.test: {}} + +--- +"Non-existent exclusion alias before wildcard returns 404": + - skip: + version: " - 6.99.99" + reason: Exclusions in the alias expression are not handled + - do: + catch: missing + indices.get_alias: + name: -test_alias_1,test_alias*,-test_alias_2 + + - match: { 'status': 404} + - match: { 'error': 'alias [-test_alias_1] missing' } + - match: {test_index.aliases.test_alias_1: {}} + - is_false: test_index.aliases.test_alias_2 + - is_false: test_index.aliases.test_blias_1 + - is_false: test_index.aliases.test_blias_2 + - is_false: test_index.aliases.test + + - do: + catch: missing + indices.get_alias: + name: -test_alias_1,-non-existing,test_alias*,-test + + - match: { 'status': 404} + - match: { 'error': 'aliases [-non-existing,-test_alias_1] missing' } + - match: {test_index.aliases.test_alias_1: {}} + - match: {test_index.aliases.test_alias_2: {}} + - is_false: test_index.aliases.test_blias_1 + - is_false: test_index.aliases.test_blias_2 + - is_false: test_index.aliases.test + +--- +"Missing exclusions does not fire 404": + - skip: + version: " - 6.99.99" + reason: Exclusions in the alias expression are not handled + - do: + indices.get_alias: + name: test_alias*,-non-existent,test_blias*,-test + + - match: {test_index.aliases.test_alias_1: {}} + - match: {test_index.aliases.test_alias_2: {}} + - match: {test_index.aliases.test_blias_1: {}} + - match: {test_index.aliases.test_blias_2: {}} + - is_false: test_index.aliases.test + +--- +"Exclusion of non wildcarded aliases": + - skip: + version: " - 6.99.99" + reason: Exclusions in the alias expression are not handled + - do: + indices.get_alias: + name: test_alias_1,test_blias_2,-test_alias*,-test_blias_2 + + - match: { '': {}} + +--- +"Wildcard exclusions does not trigger 404": + - skip: + version: " - 6.99.99" + reason: Exclusions in the alias expression are not handled + - do: + catch: missing + indices.get_alias: + name: -non-existent,-non-existent*,-another + + - match: { 'status': 404} + - match: { 'error': 'alias [-non-existent] missing' } + - is_false: test_index.aliases.test_alias_1 + - is_false: test_index.aliases.test_alias_2 + - is_false: test_index.aliases.test_blias_1 + - is_false: test_index.aliases.test_blias_2 + - is_false: test_index.aliases.test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml index c5bfca5c5b1d7..e0d61192bf754 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/10_basic.yml @@ -46,8 +46,8 @@ setup: "Nested doc version and seqIDs": - skip: - version: " - 6.3.99" - reason: "object notation for docvalue_fields was introduced in 6.4" + version: " - 6.99.99" + reason: "Triggers warnings before 7.0" - do: index: @@ -62,7 +62,7 @@ setup: - do: search: rest_total_hits_as_int: true - body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": [ { "field": "_seq_no", "format": "use_field_mapping" } ]} }}, "version": true, "docvalue_fields" : [ { "field": "_seq_no", "format": "use_field_mapping" } ] } + body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": [ "_seq_no" ]} }}, "version": true, "docvalue_fields" : [ "_seq_no" ] } - match: { hits.total: 1 } - match: { hits.hits.0._index: "test" } @@ -86,7 +86,7 @@ setup: - do: search: rest_total_hits_as_int: true - body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": [ { "field": "_seq_no", "format": "use_field_mapping" } ]} }}, "version": true, "docvalue_fields" : [ { "field": "_seq_no", "format": "use_field_mapping" } ] } + body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": [ "_seq_no" ]} }}, "version": true, "docvalue_fields" : [ "_seq_no" ] } - match: { hits.total: 1 } - match: { hits.hits.0._index: "test" } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml index e5277c0edcbbb..18191c5ee3ac2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yml @@ -144,12 +144,9 @@ setup: --- "docvalue_fields": - skip: - version: " - 6.4.0" - reason: format option was added in 6.4 and the deprecation message changed in 6.4.1 - features: warnings + version: " - 6.9.99" + reason: Triggers a deprecation warning before 7.0 - do: - warnings: - - 'There are doc-value fields which are not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with a doc value field in order to opt in for the future behaviour and ease the migration to 7.0: [count]' search: body: docvalue_fields: [ "count" ] @@ -158,12 +155,9 @@ setup: --- "multiple docvalue_fields": - skip: - version: " - 6.4.0" - reason: format option was added in 6.4 and the deprecation message changed in 6.4.1 - features: warnings + version: " - 6.9.99" + reason: Triggered a deprecation warning before 7.0 - do: - warnings: - - 'There are doc-value fields which are not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with a doc value field in order to opt in for the future behaviour and ease the migration to 7.0: [count, include.field1.keyword]' search: body: docvalue_fields: [ "count", "include.field1.keyword" ] @@ -172,12 +166,9 @@ setup: --- "docvalue_fields as url param": - skip: - version: " - 6.4.0" - reason: format option was added in 6.4 and the deprecation message changed in 6.4.1 - features: warnings + version: " - 6.99.99" + reason: Triggered a deprecation warning before 7.0 - do: - warnings: - - 'There are doc-value fields which are not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with a doc value field in order to opt in for the future behaviour and ease the migration to 7.0: [count]' search: docvalue_fields: [ "count" ] - match: { hits.hits.0.fields.count: [1] } @@ -185,9 +176,12 @@ setup: --- "docvalue_fields with default format": - skip: - version: " - 6.3.99" - reason: format option was added in 6.4 + version: " - 6.99.99" + reason: Only triggers warnings on 7.0+ + features: warnings - do: + warnings: + - "[use_field_mapping] is a special format that was only used to ease the transition to 7.x. It has become the default and shouldn't be set explicitly anymore." search: body: docvalue_fields: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml index 18a004077aeae..07a871d37e65f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml @@ -67,8 +67,8 @@ setup: "Docvalues_fields size limit": - skip: - version: " - 6.3.99" - reason: "The object notation for docvalue_fields is only supported on 6.4+" + version: " - 6.99.99" + reason: "Triggers warnings before 7.0" - do: catch: /Trying to retrieve too many docvalue_fields\. Must be less than or equal to[:] \[2\] but was \[3\]\. This limit can be set by changing the \[index.max_docvalue_fields_search\] index level setting\./ search: @@ -78,12 +78,9 @@ setup: query: match_all: {} docvalue_fields: - - field: "one" - format: "use_field_mapping" - - field: "two" - format: "use_field_mapping" - - field: "three" - format: "use_field_mapping" + - "one" + - "two" + - "three" --- "Script_fields size limit": diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index b66630344a7ea..e520d714bb931 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -116,10 +116,10 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_5_3 = new Version(V_6_5_3_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_6_5_4_ID = 6050499; public static final Version V_6_5_4 = new Version(V_6_5_4_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); - public static final int V_6_5_5_ID = 6050599; - public static final Version V_6_5_5 = new Version(V_6_5_5_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_6_6_0_ID = 6060099; public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); + public static final int V_6_6_1_ID = 6060199; + public static final Version V_6_6_1 = new Version(V_6_6_1_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_6_7_0_ID = 6070099; public static final Version V_6_7_0 = new Version(V_6_7_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_7_0_0_ID = 7000099; @@ -142,10 +142,10 @@ public static Version fromId(int id) { return V_7_0_0; case V_6_7_0_ID: return V_6_7_0; + case V_6_6_1_ID: + return V_6_6_1; case V_6_6_0_ID: return V_6_6_0; - case V_6_5_5_ID: - return V_6_5_5; case V_6_5_4_ID: return V_6_5_4; case V_6_5_3_ID: diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java new file mode 100644 index 0000000000000..a74aad3ddb586 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreClusterStateListener.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.snapshots.restore; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.RestoreInProgress; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.snapshots.RestoreInfo; +import org.elasticsearch.snapshots.RestoreService; + +import static org.elasticsearch.snapshots.RestoreService.restoreInProgress; + +public class RestoreClusterStateListener implements ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(RestoreClusterStateListener.class); + + private final ClusterService clusterService; + private final String uuid; + private final ActionListener listener; + + + private RestoreClusterStateListener(ClusterService clusterService, RestoreService.RestoreCompletionResponse response, + ActionListener listener) { + this.clusterService = clusterService; + this.uuid = response.getUuid(); + this.listener = listener; + } + + @Override + public void clusterChanged(ClusterChangedEvent changedEvent) { + final RestoreInProgress.Entry prevEntry = restoreInProgress(changedEvent.previousState(), uuid); + final RestoreInProgress.Entry newEntry = restoreInProgress(changedEvent.state(), uuid); + if (prevEntry == null) { + // When there is a master failure after a restore has been started, this listener might not be registered + // on the current master and as such it might miss some intermediary cluster states due to batching. + // Clean up listener in that case and acknowledge completion of restore operation to client. + clusterService.removeListener(this); + listener.onResponse(new RestoreSnapshotResponse(null)); + } else if (newEntry == null) { + clusterService.removeListener(this); + ImmutableOpenMap shards = prevEntry.shards(); + assert prevEntry.state().completed() : "expected completed snapshot state but was " + prevEntry.state(); + assert RestoreService.completed(shards) : "expected all restore entries to be completed"; + RestoreInfo ri = new RestoreInfo(prevEntry.snapshot().getSnapshotId().getName(), + prevEntry.indices(), + shards.size(), + shards.size() - RestoreService.failedShards(shards)); + RestoreSnapshotResponse response = new RestoreSnapshotResponse(ri); + logger.debug("restore of [{}] completed", prevEntry.snapshot().getSnapshotId()); + listener.onResponse(response); + } else { + // restore not completed yet, wait for next cluster state update + } + } + + /** + * Creates a cluster state listener and registers it with the cluster service. The listener passed as a + * parameter will be called when the restore is complete. + */ + public static void createAndRegisterListener(ClusterService clusterService, RestoreService.RestoreCompletionResponse response, + ActionListener listener) { + clusterService.addListener(new RestoreClusterStateListener(clusterService, response, listener)); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index b362be49b10ab..d8dcc5eb8f846 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -22,26 +22,17 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.snapshots.RestoreService.RestoreCompletionResponse; -import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import static org.elasticsearch.snapshots.RestoreService.restoreInProgress; - /** * Transport action for restore snapshot operation */ @@ -86,39 +77,7 @@ protected void masterOperation(final RestoreSnapshotRequest request, final Clust @Override public void onResponse(RestoreCompletionResponse restoreCompletionResponse) { if (restoreCompletionResponse.getRestoreInfo() == null && request.waitForCompletion()) { - final Snapshot snapshot = restoreCompletionResponse.getSnapshot(); - String uuid = restoreCompletionResponse.getUuid(); - - ClusterStateListener clusterStateListener = new ClusterStateListener() { - @Override - public void clusterChanged(ClusterChangedEvent changedEvent) { - final RestoreInProgress.Entry prevEntry = restoreInProgress(changedEvent.previousState(), uuid); - final RestoreInProgress.Entry newEntry = restoreInProgress(changedEvent.state(), uuid); - if (prevEntry == null) { - // When there is a master failure after a restore has been started, this listener might not be registered - // on the current master and as such it might miss some intermediary cluster states due to batching. - // Clean up listener in that case and acknowledge completion of restore operation to client. - clusterService.removeListener(this); - listener.onResponse(new RestoreSnapshotResponse(null)); - } else if (newEntry == null) { - clusterService.removeListener(this); - ImmutableOpenMap shards = prevEntry.shards(); - assert prevEntry.state().completed() : "expected completed snapshot state but was " + prevEntry.state(); - assert RestoreService.completed(shards) : "expected all restore entries to be completed"; - RestoreInfo ri = new RestoreInfo(prevEntry.snapshot().getSnapshotId().getName(), - prevEntry.indices(), - shards.size(), - shards.size() - RestoreService.failedShards(shards)); - RestoreSnapshotResponse response = new RestoreSnapshotResponse(ri); - logger.debug("restore of [{}] completed", snapshot); - listener.onResponse(response); - } else { - // restore not completed yet, wait for next cluster state update - } - } - }; - - clusterService.addListener(clusterStateListener); + RestoreClusterStateListener.createAndRegisterListener(clusterService, restoreCompletionResponse, listener); } else { listener.onResponse(new RestoreSnapshotResponse(restoreCompletionResponse.getRestoreInfo())); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index 67b3fe048bea1..c7415391675fc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.mapping.get; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; @@ -91,7 +92,8 @@ protected ShardsIterator shards(ClusterState state, InternalRequest request) { protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) { assert shardId != null; IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - Predicate metadataFieldPredicate = indicesService::isMetaDataField; + Version indexCreatedVersion = indexService.mapperService().getIndexSettings().getIndexVersionCreated(); + Predicate metadataFieldPredicate = (f) -> indicesService.isMetaDataField(indexCreatedVersion, f); Predicate fieldPredicate = metadataFieldPredicate.or(indicesService.getFieldFilter().apply(shardId.getIndexName())); DocumentMapper mapper = indexService.mapperService().documentMapper(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java index 9749aaa05b1a4..e5cd947f0f046 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java @@ -43,7 +43,7 @@ public class GetIndexTemplatesResponse extends ActionResponse implements ToXCont indexTemplates = new ArrayList<>(); } - GetIndexTemplatesResponse(List indexTemplates) { + public GetIndexTemplatesResponse(List indexTemplates) { this.indexTemplates = indexTemplates; } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index c46933578f163..01c21544047ed 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -83,7 +83,8 @@ protected FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesI for (String field : fieldNames) { MappedFieldType ft = mapperService.fullName(field); if (ft != null) { - if (indicesService.isMetaDataField(field) || fieldPredicate.test(ft.name())) { + if (indicesService.isMetaDataField(mapperService.getIndexSettings().getIndexVersionCreated(), field) + || fieldPredicate.test(ft.name())) { FieldCapabilities fieldCap = new FieldCapabilities(field, ft.typeName(), ft.isSearchable(), ft.isAggregatable()); responseMap.put(field, fieldCap); } else { diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index 10a85b723166c..afc81b21da4d5 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -129,7 +129,8 @@ private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder optio options.getSorts().forEach(groupSource::sort); } if (options.getFetchSourceContext() != null) { - if (options.getFetchSourceContext().includes() == null && options.getFetchSourceContext().excludes() == null) { + if (options.getFetchSourceContext().includes().length == 0 && + options.getFetchSourceContext().excludes().length == 0) { groupSource.fetchSource(options.getFetchSourceContext().fetchSource()); } else { groupSource.fetchSource(options.getFetchSourceContext().includes(), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index d9120342cf4cd..838b1e2547204 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -318,6 +318,28 @@ public ClusterState execute(ClusterState currentState) throws Exception { if (mappings.containsKey(cursor.key)) { XContentHelper.mergeDefaults(mappings.get(cursor.key), MapperService.parseMapping(xContentRegistry, mappingString)); + } else if (mappings.size() == 1 && cursor.key.equals(MapperService.SINGLE_MAPPING_NAME)) { + // Typeless template with typed mapping + Map templateMapping = MapperService.parseMapping(xContentRegistry, mappingString); + assert templateMapping.size() == 1 : templateMapping; + assert cursor.key.equals(templateMapping.keySet().iterator().next()) : + cursor.key + " != " + templateMapping; + Map.Entry> mappingEntry = mappings.entrySet().iterator().next(); + templateMapping = Collections.singletonMap( + mappingEntry.getKey(), // reuse type name from the mapping + templateMapping.values().iterator().next()); // but actual mappings from the template + XContentHelper.mergeDefaults(mappingEntry.getValue(), templateMapping); + } else if (template.mappings().size() == 1 && mappings.containsKey(MapperService.SINGLE_MAPPING_NAME)) { + // Typed template with typeless mapping + Map templateMapping = MapperService.parseMapping(xContentRegistry, mappingString); + assert templateMapping.size() == 1 : templateMapping; + assert cursor.key.equals(templateMapping.keySet().iterator().next()) : + cursor.key + " != " + templateMapping; + Map mapping = mappings.get(MapperService.SINGLE_MAPPING_NAME); + templateMapping = Collections.singletonMap( + MapperService.SINGLE_MAPPING_NAME, // make template mapping typeless + templateMapping.values().iterator().next()); + XContentHelper.mergeDefaults(mapping, templateMapping); } else { mappings.put(cursor.key, MapperService.parseMapping(xContentRegistry, mappingString)); diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index 2e3c2953ec375..0b92955583f7f 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -108,9 +108,6 @@ public class DateFormatters { .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) .optionalStart() - .appendFraction(NANO_OF_SECOND, 3, 3, true) - .optionalEnd() - .optionalStart() .appendFraction(NANO_OF_SECOND, 3, 9, true) .optionalEnd() .optionalEnd() @@ -205,7 +202,7 @@ public class DateFormatters { .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(NANO_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 9, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter BASIC_TIME_PRINTER = new DateTimeFormatterBuilder() @@ -311,7 +308,7 @@ public class DateFormatters { private static final DateFormatter BASIC_ORDINAL_DATE_TIME = new JavaDateFormatter("basic_ordinal_date_time", new DateTimeFormatterBuilder().appendPattern("yyyyDDD").append(BASIC_T_TIME_PRINTER) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), - new DateTimeFormatterBuilder().appendPattern("yyyyDDD").append(BASIC_T_TIME_PRINTER) + new DateTimeFormatterBuilder().appendPattern("yyyyDDD").append(BASIC_T_TIME_FORMATTER) .appendZoneOrOffsetId().toFormatter(Locale.ROOT), new DateTimeFormatterBuilder().appendPattern("yyyyDDD").append(BASIC_T_TIME_FORMATTER) .append(TIME_ZONE_FORMATTER_NO_COLON).toFormatter(Locale.ROOT) @@ -419,7 +416,7 @@ public class DateFormatters { .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(NANO_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 9, true) .appendZoneOrOffsetId() .toFormatter(Locale.ROOT), new DateTimeFormatterBuilder() @@ -428,7 +425,7 @@ public class DateFormatters { .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(NANO_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 9, true) .append(TIME_ZONE_FORMATTER_NO_COLON) .toFormatter(Locale.ROOT) ); @@ -485,7 +482,7 @@ public class DateFormatters { .appendLiteral('T') .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) .optionalStart() - .appendFraction(NANO_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 9, true) .optionalEnd() .toFormatter(Locale.ROOT); @@ -542,7 +539,7 @@ public class DateFormatters { // NOTE: this is not a strict formatter to retain the joda time based behaviour, even though it's named like this private static final DateTimeFormatter STRICT_HOUR_MINUTE_SECOND_MILLIS_FORMATTER = new DateTimeFormatterBuilder() .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - .appendFraction(NANO_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 9, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER = new DateTimeFormatterBuilder() @@ -582,8 +579,8 @@ public class DateFormatters { .append(STRICT_YEAR_MONTH_DAY_FORMATTER) .appendLiteral("T") .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - // this one here is lenient as well to retain joda time based bwc compatibility - .appendFraction(NANO_OF_SECOND, 1, 3, true) + // this one here is lenient as well to retain joda time based bwc compatibility + .appendFraction(NANO_OF_SECOND, 1, 9, true) .toFormatter(Locale.ROOT) ); @@ -599,7 +596,7 @@ public class DateFormatters { .appendLiteral("T") .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) // this one here is lenient as well to retain joda time based bwc compatibility - .appendFraction(NANO_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 9, true) .toFormatter(Locale.ROOT) ); @@ -625,7 +622,7 @@ public class DateFormatters { .optionalStart() .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(NANO_OF_SECOND, 3, 3, true) + .appendFraction(NANO_OF_SECOND, 3, 9, true) .optionalEnd() .toFormatter(Locale.ROOT); @@ -649,7 +646,7 @@ public class DateFormatters { .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(NANO_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 9, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter STRICT_TIME_PRINTER = new DateTimeFormatterBuilder() @@ -880,7 +877,7 @@ public class DateFormatters { .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) .optionalEnd() .optionalStart() - .appendFraction(NANO_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 9, true) .optionalEnd() .optionalStart().appendZoneOrOffsetId().optionalEnd() .optionalStart().appendOffset("+HHmm", "Z").optionalEnd() @@ -904,6 +901,15 @@ public class DateFormatters { .appendFraction(NANO_OF_SECOND, 1, 3, true) .toFormatter(Locale.ROOT); + private static final DateTimeFormatter HOUR_MINUTE_SECOND_FRACTION_FORMATTER = new DateTimeFormatterBuilder() + .appendValue(HOUR_OF_DAY, 1, 2, SignStyle.NOT_NEGATIVE) + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 1, 2, SignStyle.NOT_NEGATIVE) + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) + .appendFraction(NANO_OF_SECOND, 1, 9, true) + .toFormatter(Locale.ROOT); + private static final DateTimeFormatter ORDINAL_DATE_FORMATTER = new DateTimeFormatterBuilder() .appendValue(ChronoField.YEAR, 4, 10, SignStyle.EXCEEDS_PAD) .appendLiteral('-') @@ -936,7 +942,7 @@ public class DateFormatters { private static final DateTimeFormatter TIME_PREFIX = new DateTimeFormatterBuilder() .append(TIME_NO_MILLIS_FORMATTER) - .appendFraction(NANO_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 9, true) .toFormatter(Locale.ROOT); private static final DateTimeFormatter WEEK_DATE_FORMATTER = new DateTimeFormatterBuilder() @@ -974,8 +980,7 @@ public class DateFormatters { /* * Returns a formatter that combines a full date, two digit hour of day, * two digit minute of hour, two digit second of minute, and three digit - * fraction of second (yyyy-MM-dd'T'HH:mm:ss.SSS). Parsing will parse up - * to 3 fractional second digits. + * fraction of second (yyyy-MM-dd'T'HH:mm:ss.SSS). */ private static final DateFormatter DATE_HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter("date_hour_minute_second_millis", @@ -990,7 +995,8 @@ public class DateFormatters { .append(HOUR_MINUTE_SECOND_MILLIS_FORMATTER) .toFormatter(Locale.ROOT)); - private static final DateFormatter DATE_HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter("date_hour_minute_second_fraction", + private static final DateFormatter DATE_HOUR_MINUTE_SECOND_FRACTION = + new JavaDateFormatter("date_hour_minute_second_fraction", new DateTimeFormatterBuilder() .append(STRICT_YEAR_MONTH_DAY_FORMATTER) .appendLiteral("T") @@ -999,7 +1005,7 @@ public class DateFormatters { new DateTimeFormatterBuilder() .append(DATE_FORMATTER) .appendLiteral("T") - .append(HOUR_MINUTE_SECOND_MILLIS_FORMATTER) + .append(HOUR_MINUTE_SECOND_FRACTION_FORMATTER) .toFormatter(Locale.ROOT)); /* @@ -1034,7 +1040,7 @@ public class DateFormatters { .optionalStart() .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(NANO_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 9, true) .optionalEnd() .toFormatter(Locale.ROOT); @@ -1106,7 +1112,7 @@ public class DateFormatters { STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, HOUR_MINUTE_SECOND_MILLIS_FORMATTER); private static final DateFormatter HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter("hour_minute_second_fraction", - STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, HOUR_MINUTE_SECOND_MILLIS_FORMATTER); + STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, HOUR_MINUTE_SECOND_FRACTION_FORMATTER); /* * Returns a formatter for a two digit hour of day and two digit minute of @@ -1142,7 +1148,7 @@ public class DateFormatters { .optionalStart() .appendLiteral(':') .appendValue(SECOND_OF_MINUTE, 1, 2, SignStyle.NOT_NEGATIVE) - .appendFraction(NANO_OF_SECOND, 1, 3, true) + .appendFraction(NANO_OF_SECOND, 1, 9, true) .optionalEnd() .toFormatter(Locale.ROOT); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java new file mode 100644 index 0000000000000..1427e67c5389b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/AllFieldMapper.java @@ -0,0 +1,175 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * Noop mapper that ensures that mappings created in 6x that explicitly disable the _all field + * can be restored in this version. + * + * TODO: Remove in 8 + */ +public class AllFieldMapper extends MetadataFieldMapper { + public static final String NAME = "_all"; + public static final String CONTENT_TYPE = "_all"; + + public static class Defaults { + public static final MappedFieldType FIELD_TYPE = new AllFieldType(); + + static { + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); + FIELD_TYPE.setTokenized(true); + FIELD_TYPE.setName(NAME); + FIELD_TYPE.freeze(); + } + } + + public static class Builder extends MetadataFieldMapper.Builder { + private boolean disableExplicit = false; + + public Builder(MappedFieldType existing) { + super(NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); + builder = this; + } + + private Builder setDisableExplicit() { + this.disableExplicit = true; + return this; + } + + @Override + public AllFieldMapper build(BuilderContext context) { + return new AllFieldMapper(fieldType, context.indexSettings(), disableExplicit); + } + } + + public static class TypeParser implements MetadataFieldMapper.TypeParser { + @Override + public MetadataFieldMapper.Builder parse(String name, Map node, + ParserContext parserContext) throws MapperParsingException { + Builder builder = new Builder(parserContext.mapperService().fullName(NAME)); + for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { + Map.Entry entry = iterator.next(); + String fieldName = entry.getKey(); + if (fieldName.equals("enabled")) { + boolean enabled = XContentMapValues.nodeBooleanValue(entry.getValue(), "enabled"); + if (enabled) { + throw new IllegalArgumentException("[_all] is disabled in this version."); + } + builder.setDisableExplicit(); + iterator.remove(); + } + } + return builder; + } + + @Override + public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) { + final Settings indexSettings = context.mapperService().getIndexSettings().getSettings(); + return new AllFieldMapper(indexSettings, Defaults.FIELD_TYPE, false); + } + } + + static final class AllFieldType extends StringFieldType { + AllFieldType() { + } + + protected AllFieldType(AllFieldType ref) { + super(ref); + } + + @Override + public MappedFieldType clone() { + return new AllFieldType(this); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public Query existsQuery(QueryShardContext context) { + return new MatchNoDocsQuery(); + } + } + + private final boolean disableExplicit; + + private AllFieldMapper(Settings indexSettings, MappedFieldType existing, boolean disableExplicit) { + this(existing.clone(), indexSettings, disableExplicit); + } + + private AllFieldMapper(MappedFieldType fieldType, Settings indexSettings, boolean disableExplicit) { + super(NAME, fieldType, Defaults.FIELD_TYPE, indexSettings); + this.disableExplicit = disableExplicit; + } + + @Override + public void preParse(ParseContext context) throws IOException { + } + + @Override + public void postParse(ParseContext context) throws IOException { + super.parse(context); + } + + @Override + public void parse(ParseContext context) throws IOException { + // we parse in post parse + } + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + // noop mapper + return; + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + boolean includeDefaults = params.paramAsBoolean("include_defaults", false); + if (includeDefaults || disableExplicit) { + builder.startObject(CONTENT_TYPE); + if (disableExplicit) { + builder.field("enabled", false); + } + builder.endObject(); + } + return builder; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 058cf68e8e1f4..044e65c7ec6fb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -73,7 +74,9 @@ public Builder(RootObjectMapper.Builder builder, MapperService mapperService) { final String type = rootObjectMapper.name(); final DocumentMapper existingMapper = mapperService.documentMapper(type); - final Map metadataMapperParsers = mapperService.mapperRegistry.getMetadataMapperParsers(); + final Version indexCreatedVersion = mapperService.getIndexSettings().getIndexVersionCreated(); + final Map metadataMapperParsers = + mapperService.mapperRegistry.getMetadataMapperParsers(indexCreatedVersion); for (Map.Entry entry : metadataMapperParsers.entrySet()) { final String name = entry.getKey(); final MetadataFieldMapper existingMetadataMapper = existingMapper == null diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java index e388dd7ebcd00..db7954e9bd76a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentMapperParser.java @@ -65,8 +65,8 @@ public DocumentMapperParser(IndexSettings indexSettings, MapperService mapperSer this.similarityService = similarityService; this.queryShardContextSupplier = queryShardContextSupplier; this.typeParsers = mapperRegistry.getMapperParsers(); - this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers(); - indexVersionCreated = indexSettings.getIndexVersionCreated(); + this.indexVersionCreated = indexSettings.getIndexVersionCreated(); + this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers(indexVersionCreated); } public Mapper.TypeParser.ParserContext parserContext(String type) { diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java index 7ba3013497990..4fc5770709c1b 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java @@ -241,6 +241,16 @@ public long getVersion() { return delegate.getVersion(); } + @Override + public long getSeqNo() { + return delegate.getSeqNo(); + } + + @Override + public long getPrimaryTerm() { + return delegate.getPrimaryTerm(); + } + @Override public String getRouting() { return fieldValue(RoutingFieldMapper.NAME); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java b/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java index a3901bb7a568b..dc8d69ff4f0d1 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ScrollableHitSource.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.search.SearchHit; import org.elasticsearch.threadpool.ThreadPool; @@ -190,6 +191,17 @@ public interface Hit { * internal APIs. */ long getVersion(); + + /** + * The sequence number of the match or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} if sequence numbers weren't requested. + */ + long getSeqNo(); + + /** + * The primary term of the match or {@link SequenceNumbers#UNASSIGNED_PRIMARY_TERM} if sequence numbers weren't requested. + */ + long getPrimaryTerm(); + /** * The source of the hit. Returns null if the source didn't come back from the search, usually because it source wasn't stored at * all. @@ -217,6 +229,8 @@ public static class BasicHit implements Hit { private BytesReference source; private XContentType xContentType; private String routing; + private long seqNo; + private long primaryTerm; public BasicHit(String index, String type, String id, long version) { this.index = index; @@ -245,6 +259,16 @@ public long getVersion() { return version; } + @Override + public long getSeqNo() { + return seqNo; + } + + @Override + public long getPrimaryTerm() { + return primaryTerm; + } + @Override public BytesReference getSource() { return source; @@ -270,6 +294,14 @@ public BasicHit setRouting(String routing) { this.routing = routing; return this; } + + public void setSeqNo(long seqNo) { + this.seqNo = seqNo; + } + + public void setPrimaryTerm(long primaryTerm) { + this.primaryTerm = primaryTerm; + } } /** diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java index 13e3381b11553..24d144d810d9c 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLease.java @@ -101,7 +101,7 @@ public RetentionLease(final String id, final long retainingSequenceNumber, final // retention lease IDs can not contain these characters because they are used in encoding retention leases throw new IllegalArgumentException("retention lease ID can not contain any of [:;,] but was [" + id + "]"); } - if (retainingSequenceNumber < SequenceNumbers.UNASSIGNED_SEQ_NO) { + if (retainingSequenceNumber < 0) { throw new IllegalArgumentException("retention lease retaining sequence number [" + retainingSequenceNumber + "] out of range"); } if (timestamp < 0) { diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index fa42776403dca..cca63c015f1c7 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -29,6 +29,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; @@ -1382,8 +1383,8 @@ public Function> getFieldFilter() { /** * Returns true if the provided field is a registered metadata field (including ones registered via plugins), false otherwise. */ - public boolean isMetaDataField(String field) { - return mapperRegistry.isMetaDataField(field); + public boolean isMetaDataField(Version indexCreatedVersion, String field) { + return mapperRegistry.isMetaDataField(indexCreatedVersion, field); } /** diff --git a/server/src/main/java/org/elasticsearch/indices/mapper/MapperRegistry.java b/server/src/main/java/org/elasticsearch/indices/mapper/MapperRegistry.java index 41d563c2037ea..c79da36200e4f 100644 --- a/server/src/main/java/org/elasticsearch/indices/mapper/MapperRegistry.java +++ b/server/src/main/java/org/elasticsearch/indices/mapper/MapperRegistry.java @@ -19,6 +19,8 @@ package org.elasticsearch.indices.mapper; +import org.elasticsearch.Version; +import org.elasticsearch.index.mapper.AllFieldMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.plugins.MapperPlugin; @@ -36,6 +38,7 @@ public final class MapperRegistry { private final Map mapperParsers; private final Map metadataMapperParsers; + private final Map metadataMapperParsers6x; private final Function> fieldFilter; @@ -43,6 +46,11 @@ public MapperRegistry(Map mapperParsers, Map metadataMapperParsers, Function> fieldFilter) { this.mapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(mapperParsers)); this.metadataMapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(metadataMapperParsers)); + // add the _all field mapper for indices created in 6x + Map metadata6x = new LinkedHashMap<>(); + metadata6x.put(AllFieldMapper.NAME, new AllFieldMapper.TypeParser()); + metadata6x.putAll(metadataMapperParsers); + this.metadataMapperParsers6x = Collections.unmodifiableMap(metadata6x); this.fieldFilter = fieldFilter; } @@ -58,15 +66,15 @@ public Map getMapperParsers() { * Return a map of the meta mappers that have been registered. The * returned map uses the name of the field as a key. */ - public Map getMetadataMapperParsers() { - return metadataMapperParsers; + public Map getMetadataMapperParsers(Version indexCreatedVersion) { + return indexCreatedVersion.onOrAfter(Version.V_7_0_0) ? metadataMapperParsers : metadataMapperParsers6x; } /** - * Returns true if the provide field is a registered metadata field, false otherwise + * Returns true if the provided field is a registered metadata field, false otherwise */ - public boolean isMetaDataField(String field) { - return getMetadataMapperParsers().containsKey(field); + public boolean isMetaDataField(Version indexCreatedVersion, String field) { + return getMetadataMapperParsers(indexCreatedVersion).containsKey(field); } /** diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index 0d6d46e95b602..8cdf9e62b1096 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -25,11 +25,11 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; @@ -41,14 +41,12 @@ import org.elasticsearch.rest.action.RestBuilderListener; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Set; import java.util.SortedSet; -import java.util.stream.Collectors; +import java.util.TreeSet; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.HEAD; @@ -75,6 +73,94 @@ public String getName() { return "get_aliases_action"; } + static RestResponse buildRestResponse(boolean aliasesExplicitlyRequested, String[] requestedAliases, + ImmutableOpenMap> responseAliasMap, XContentBuilder builder) throws Exception { + final Set indicesToDisplay = new HashSet<>(); + final Set returnedAliasNames = new HashSet<>(); + for (final ObjectObjectCursor> cursor : responseAliasMap) { + for (final AliasMetaData aliasMetaData : cursor.value) { + if (aliasesExplicitlyRequested) { + // only display indices that have aliases + indicesToDisplay.add(cursor.key); + } + returnedAliasNames.add(aliasMetaData.alias()); + } + } + // compute explicitly requested aliases that have are not returned in the result + final SortedSet missingAliases = new TreeSet<>(); + // first wildcard index, leading "-" as an alias name after this index means + // that it is an exclusion + int firstWildcardIndex = requestedAliases.length; + for (int i = 0; i < requestedAliases.length; i++) { + if (Regex.isSimpleMatchPattern(requestedAliases[i])) { + firstWildcardIndex = i; + break; + } + } + for (int i = 0; i < requestedAliases.length; i++) { + if (MetaData.ALL.equals(requestedAliases[i]) || Regex.isSimpleMatchPattern(requestedAliases[i]) + || (i > firstWildcardIndex && requestedAliases[i].charAt(0) == '-')) { + // only explicitly requested aliases will be called out as missing (404) + continue; + } + // check if aliases[i] is subsequently excluded + int j = Math.max(i + 1, firstWildcardIndex); + for (; j < requestedAliases.length; j++) { + if (requestedAliases[j].charAt(0) == '-') { + // this is an exclude pattern + if (Regex.simpleMatch(requestedAliases[j].substring(1), requestedAliases[i]) + || MetaData.ALL.equals(requestedAliases[j].substring(1))) { + // aliases[i] is excluded by aliases[j] + break; + } + } + } + if (j == requestedAliases.length) { + // explicitly requested aliases[i] is not excluded by any subsequent "-" wildcard in expression + if (false == returnedAliasNames.contains(requestedAliases[i])) { + // aliases[i] is not in the result set + missingAliases.add(requestedAliases[i]); + } + } + } + + final RestStatus status; + builder.startObject(); + { + if (missingAliases.isEmpty()) { + status = RestStatus.OK; + } else { + status = RestStatus.NOT_FOUND; + final String message; + if (missingAliases.size() == 1) { + message = String.format(Locale.ROOT, "alias [%s] missing", Strings.collectionToCommaDelimitedString(missingAliases)); + } else { + message = String.format(Locale.ROOT, "aliases [%s] missing", Strings.collectionToCommaDelimitedString(missingAliases)); + } + builder.field("error", message); + builder.field("status", status.getStatus()); + } + + for (final ObjectObjectCursor> entry : responseAliasMap) { + if (aliasesExplicitlyRequested == false || (aliasesExplicitlyRequested && indicesToDisplay.contains(entry.key))) { + builder.startObject(entry.key); + { + builder.startObject("aliases"); + { + for (final AliasMetaData alias : entry.value) { + AliasMetaData.Builder.toXContent(alias, builder, ToXContent.EMPTY_PARAMS); + } + } + builder.endObject(); + } + builder.endObject(); + } + } + } + builder.endObject(); + return new BytesRestResponse(status, builder); + } + @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { // The TransportGetAliasesAction was improved do the same post processing as is happening here. @@ -94,76 +180,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return channel -> client.admin().indices().getAliases(getAliasesRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(GetAliasesResponse response, XContentBuilder builder) throws Exception { - final ImmutableOpenMap> aliasMap = response.getAliases(); - - final Set aliasNames = new HashSet<>(); - final Set indicesToDisplay = new HashSet<>(); - for (final ObjectObjectCursor> cursor : aliasMap) { - for (final AliasMetaData aliasMetaData : cursor.value) { - aliasNames.add(aliasMetaData.alias()); - if (namesProvided) { - indicesToDisplay.add(cursor.key); - } - } - } - - // first remove requested aliases that are exact matches - final SortedSet difference = Sets.sortedDifference(Arrays.stream(aliases).collect(Collectors.toSet()), aliasNames); - - // now remove requested aliases that contain wildcards that are simple matches - final List matches = new ArrayList<>(); - outer: - for (final String pattern : difference) { - if (pattern.contains("*")) { - for (final String aliasName : aliasNames) { - if (Regex.simpleMatch(pattern, aliasName)) { - matches.add(pattern); - continue outer; - } - } - } - } - difference.removeAll(matches); - - final RestStatus status; - builder.startObject(); - { - if (difference.isEmpty()) { - status = RestStatus.OK; - } else { - status = RestStatus.NOT_FOUND; - final String message; - if (difference.size() == 1) { - message = String.format(Locale.ROOT, "alias [%s] missing", - Strings.collectionToCommaDelimitedString(difference)); - } else { - message = String.format(Locale.ROOT, "aliases [%s] missing", - Strings.collectionToCommaDelimitedString(difference)); - } - builder.field("error", message); - builder.field("status", status.getStatus()); - } - - for (final ObjectObjectCursor> entry : response.getAliases()) { - if (namesProvided == false || (namesProvided && indicesToDisplay.contains(entry.key))) { - builder.startObject(entry.key); - { - builder.startObject("aliases"); - { - for (final AliasMetaData alias : entry.value) { - AliasMetaData.Builder.toXContent(alias, builder, ToXContent.EMPTY_PARAMS); - } - } - builder.endObject(); - } - builder.endObject(); - } - } - } - builder.endObject(); - return new BytesRestResponse(status, builder); + return buildRestResponse(namesProvided, aliases, response.getAliases(), builder); } - }); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java index 50370797aa6f2..707378eec4cf6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndexTemplateAction.java @@ -19,10 +19,12 @@ package org.elasticsearch.rest.action.admin.indices; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.rest.BaseRestHandler; @@ -47,6 +49,10 @@ public class RestGetIndexTemplateAction extends BaseRestHandler { private static final Set RESPONSE_PARAMETERS = Collections.unmodifiableSet(Sets.union( Collections.singleton(INCLUDE_TYPE_NAME_PARAMETER), Settings.FORMAT_PARAMS)); + private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + LogManager.getLogger(RestGetIndexTemplateAction.class)); + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + + " Specifying include_type_name in get index template requests is deprecated."; public RestGetIndexTemplateAction(final Settings settings, final RestController controller) { super(settings); @@ -65,6 +71,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final String[] names = Strings.splitStringByCommaToArray(request.param("name")); final GetIndexTemplatesRequest getIndexTemplatesRequest = new GetIndexTemplatesRequest(names); + if (request.hasParam(INCLUDE_TYPE_NAME_PARAMETER)) { + deprecationLogger.deprecatedAndMaybeLog("get_index_template_include_type_name", TYPES_DEPRECATION_MESSAGE); + } getIndexTemplatesRequest.local(request.paramAsBoolean("local", getIndexTemplatesRequest.local())); getIndexTemplatesRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getIndexTemplatesRequest.masterNodeTimeout())); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java index 2b72a724a8906..445d393fef82b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateAction.java @@ -42,6 +42,9 @@ public class RestPutIndexTemplateAction extends BaseRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger( LogManager.getLogger(RestPutIndexTemplateAction.class)); + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + + " Specifying include_type_name in put index template requests is deprecated."+ + " The parameter will be removed in the next major version."; public RestPutIndexTemplateAction(Settings settings, RestController controller) { super(settings); @@ -57,6 +60,9 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { PutIndexTemplateRequest putRequest = new PutIndexTemplateRequest(request.param("name")); + if (request.hasParam(INCLUDE_TYPE_NAME_PARAMETER)) { + deprecationLogger.deprecatedAndMaybeLog("put_index_template_with_types", TYPES_DEPRECATION_MESSAGE); + } if (request.hasParam("template")) { deprecationLogger.deprecated("Deprecated parameter[template] used, replaced by [index_patterns]"); putRequest.patterns(Collections.singletonList(request.param("template"))); diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index df82bbec59900..4cf3bda83530b 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -80,17 +79,17 @@ * * @see SearchHits */ -public final class SearchHit implements Streamable, ToXContentObject, Iterable { +public final class SearchHit implements Writeable, ToXContentObject, Iterable { - private transient int docId; + private final transient int docId; private static final float DEFAULT_SCORE = Float.NaN; private float score = DEFAULT_SCORE; - private Text id; - private Text type; + private final Text id; + private final Text type; - private NestedIdentity nestedIdentity; + private final NestedIdentity nestedIdentity; private long version = -1; private long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; @@ -98,7 +97,7 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable fields = emptyMap(); + private Map fields; private Map highlightFields = null; @@ -121,10 +120,6 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable innerHits; - SearchHit() { - - } - //used only in tests public SearchHit(int docId) { this(docId, null, null, null); @@ -146,6 +141,134 @@ public SearchHit(int nestedTopDocId, String id, Text type, NestedIdentity nested this.fields = fields; } + public SearchHit(StreamInput in) throws IOException { + docId = -1; + score = in.readFloat(); + id = in.readOptionalText(); + type = in.readOptionalText(); + nestedIdentity = in.readOptionalWriteable(NestedIdentity::new); + version = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { + seqNo = in.readZLong(); + primaryTerm = in.readVLong(); + } + source = in.readBytesReference(); + if (source.length() == 0) { + source = null; + } + if (in.readBoolean()) { + explanation = readExplanation(in); + } + int size = in.readVInt(); + if (size == 0) { + fields = emptyMap(); + } else if (size == 1) { + DocumentField hitField = DocumentField.readDocumentField(in); + fields = singletonMap(hitField.getName(), hitField); + } else { + Map fields = new HashMap<>(); + for (int i = 0; i < size; i++) { + DocumentField hitField = DocumentField.readDocumentField(in); + fields.put(hitField.getName(), hitField); + } + this.fields = unmodifiableMap(fields); + } + + size = in.readVInt(); + if (size == 0) { + highlightFields = emptyMap(); + } else if (size == 1) { + HighlightField field = readHighlightField(in); + highlightFields = singletonMap(field.name(), field); + } else { + Map highlightFields = new HashMap<>(); + for (int i = 0; i < size; i++) { + HighlightField field = readHighlightField(in); + highlightFields.put(field.name(), field); + } + this.highlightFields = unmodifiableMap(highlightFields); + } + + sortValues = new SearchSortValues(in); + + size = in.readVInt(); + if (size > 0) { + matchedQueries = new String[size]; + for (int i = 0; i < size; i++) { + matchedQueries[i] = in.readString(); + } + } + // we call the setter here because that also sets the local index parameter + shard(in.readOptionalWriteable(SearchShardTarget::new)); + size = in.readVInt(); + if (size > 0) { + innerHits = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String key = in.readString(); + SearchHits value = new SearchHits(in); + innerHits.put(key, value); + } + } else { + innerHits = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeFloat(score); + out.writeOptionalText(id); + out.writeOptionalText(type); + out.writeOptionalWriteable(nestedIdentity); + out.writeLong(version); + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { + out.writeZLong(seqNo); + out.writeVLong(primaryTerm); + } + out.writeBytesReference(source); + if (explanation == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + writeExplanation(out, explanation); + } + if (fields == null) { + out.writeVInt(0); + } else { + out.writeVInt(fields.size()); + for (DocumentField hitField : getFields().values()) { + hitField.writeTo(out); + } + } + if (highlightFields == null) { + out.writeVInt(0); + } else { + out.writeVInt(highlightFields.size()); + for (HighlightField highlightField : highlightFields.values()) { + highlightField.writeTo(out); + } + } + sortValues.writeTo(out); + + if (matchedQueries.length == 0) { + out.writeVInt(0); + } else { + out.writeVInt(matchedQueries.length); + for (String matchedFilter : matchedQueries) { + out.writeString(matchedFilter); + } + } + out.writeOptionalWriteable(shard); + if (innerHits == null) { + out.writeVInt(0); + } else { + out.writeVInt(innerHits.size()); + for (Map.Entry entry : innerHits.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + } + } + public int docId() { return this.docId; } @@ -771,140 +894,6 @@ private void buildExplanation(XContentBuilder builder, Explanation explanation) builder.endObject(); } - public static SearchHit readSearchHit(StreamInput in) throws IOException { - SearchHit hit = new SearchHit(); - hit.readFrom(in); - return hit; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - score = in.readFloat(); - id = in.readOptionalText(); - type = in.readOptionalText(); - nestedIdentity = in.readOptionalWriteable(NestedIdentity::new); - version = in.readLong(); - if (in.getVersion().onOrAfter(Version.V_6_7_0)) { - seqNo = in.readZLong(); - primaryTerm = in.readVLong(); - } - source = in.readBytesReference(); - if (source.length() == 0) { - source = null; - } - if (in.readBoolean()) { - explanation = readExplanation(in); - } - int size = in.readVInt(); - if (size == 0) { - fields = emptyMap(); - } else if (size == 1) { - DocumentField hitField = DocumentField.readDocumentField(in); - fields = singletonMap(hitField.getName(), hitField); - } else { - Map fields = new HashMap<>(); - for (int i = 0; i < size; i++) { - DocumentField hitField = DocumentField.readDocumentField(in); - fields.put(hitField.getName(), hitField); - } - this.fields = unmodifiableMap(fields); - } - - size = in.readVInt(); - if (size == 0) { - highlightFields = emptyMap(); - } else if (size == 1) { - HighlightField field = readHighlightField(in); - highlightFields = singletonMap(field.name(), field); - } else { - Map highlightFields = new HashMap<>(); - for (int i = 0; i < size; i++) { - HighlightField field = readHighlightField(in); - highlightFields.put(field.name(), field); - } - this.highlightFields = unmodifiableMap(highlightFields); - } - - sortValues = new SearchSortValues(in); - - size = in.readVInt(); - if (size > 0) { - matchedQueries = new String[size]; - for (int i = 0; i < size; i++) { - matchedQueries[i] = in.readString(); - } - } - // we call the setter here because that also sets the local index parameter - shard(in.readOptionalWriteable(SearchShardTarget::new)); - size = in.readVInt(); - if (size > 0) { - innerHits = new HashMap<>(size); - for (int i = 0; i < size; i++) { - String key = in.readString(); - SearchHits value = SearchHits.readSearchHits(in); - innerHits.put(key, value); - } - } else { - innerHits = null; - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeFloat(score); - out.writeOptionalText(id); - out.writeOptionalText(type); - out.writeOptionalWriteable(nestedIdentity); - out.writeLong(version); - if (out.getVersion().onOrAfter(Version.V_6_7_0)) { - out.writeZLong(seqNo); - out.writeVLong(primaryTerm); - } - out.writeBytesReference(source); - if (explanation == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - writeExplanation(out, explanation); - } - if (fields == null) { - out.writeVInt(0); - } else { - out.writeVInt(fields.size()); - for (DocumentField hitField : getFields().values()) { - hitField.writeTo(out); - } - } - if (highlightFields == null) { - out.writeVInt(0); - } else { - out.writeVInt(highlightFields.size()); - for (HighlightField highlightField : highlightFields.values()) { - highlightField.writeTo(out); - } - } - sortValues.writeTo(out); - - if (matchedQueries.length == 0) { - out.writeVInt(0); - } else { - out.writeVInt(matchedQueries.length); - for (String matchedFilter : matchedQueries) { - out.writeString(matchedFilter); - } - } - out.writeOptionalWriteable(shard); - if (innerHits == null) { - out.writeVInt(0); - } else { - out.writeVInt(innerHits.size()); - for (Map.Entry entry : innerHits.entrySet()) { - out.writeString(entry.getKey()); - entry.getValue().writeTo(out); - } - } - } - @Override public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) { diff --git a/server/src/main/java/org/elasticsearch/search/SearchHits.java b/server/src/main/java/org/elasticsearch/search/SearchHits.java index f04183ffde700..93478c94048a2 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHits.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHits.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.xcontent.ToXContentFragment; @@ -43,7 +42,7 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -public final class SearchHits implements Streamable, ToXContentFragment, Iterable { +public final class SearchHits implements Writeable, ToXContentFragment, Iterable { public static SearchHits empty() { return empty(true); } @@ -55,22 +54,15 @@ public static SearchHits empty(boolean withTotalHits) { public static final SearchHit[] EMPTY = new SearchHit[0]; - private SearchHit[] hits; - - private Total totalHits; - - private float maxScore; - + private final SearchHit[] hits; + private final Total totalHits; + private final float maxScore; @Nullable - private SortField[] sortFields; + private final SortField[] sortFields; @Nullable - private String collapseField; + private final String collapseField; @Nullable - private Object[] collapseValues; - - SearchHits() { - - } + private final Object[] collapseValues; public SearchHits(SearchHit[] hits, @Nullable TotalHits totalHits, float maxScore) { this(hits, totalHits, maxScore, null, null, null); @@ -86,6 +78,55 @@ public SearchHits(SearchHit[] hits, @Nullable TotalHits totalHits, float maxScor this.collapseValues = collapseValues; } + public SearchHits(StreamInput in) throws IOException { + if (in.readBoolean()) { + totalHits = new Total(in); + } else { + // track_total_hits is false + totalHits = null; + } + maxScore = in.readFloat(); + int size = in.readVInt(); + if (size == 0) { + hits = EMPTY; + } else { + hits = new SearchHit[size]; + for (int i = 0; i < hits.length; i++) { + hits[i] = new SearchHit(in); + } + } + if (in.getVersion().onOrAfter(Version.V_6_6_0)) { + sortFields = in.readOptionalArray(Lucene::readSortField, SortField[]::new); + collapseField = in.readOptionalString(); + collapseValues = in.readOptionalArray(Lucene::readSortValue, Object[]::new); + } else { + sortFields = null; + collapseField = null; + collapseValues = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + final boolean hasTotalHits = totalHits != null; + out.writeBoolean(hasTotalHits); + if (hasTotalHits) { + totalHits.writeTo(out); + } + out.writeFloat(maxScore); + out.writeVInt(hits.length); + if (hits.length > 0) { + for (SearchHit hit : hits) { + hit.writeTo(out); + } + } + if (out.getVersion().onOrAfter(Version.V_6_6_0)) { + out.writeOptionalArray(Lucene::writeSortField, sortFields); + out.writeOptionalString(collapseField); + out.writeOptionalArray(Lucene::writeSortValue, collapseValues); + } + } + /** * The total number of hits for the query or null if the tracking of total hits * is disabled in the request. @@ -222,58 +263,6 @@ public static SearchHits fromXContent(XContentParser parser) throws IOException return new SearchHits(hits.toArray(new SearchHit[0]), totalHits, maxScore); } - public static SearchHits readSearchHits(StreamInput in) throws IOException { - SearchHits hits = new SearchHits(); - hits.readFrom(in); - return hits; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - if (in.readBoolean()) { - totalHits = new Total(in); - } else { - // track_total_hits is false - totalHits = null; - } - maxScore = in.readFloat(); - int size = in.readVInt(); - if (size == 0) { - hits = EMPTY; - } else { - hits = new SearchHit[size]; - for (int i = 0; i < hits.length; i++) { - hits[i] = SearchHit.readSearchHit(in); - } - } - if (in.getVersion().onOrAfter(Version.V_6_6_0)) { - sortFields = in.readOptionalArray(Lucene::readSortField, SortField[]::new); - collapseField = in.readOptionalString(); - collapseValues = in.readOptionalArray(Lucene::readSortValue, Object[]::new); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - final boolean hasTotalHits = totalHits != null; - out.writeBoolean(hasTotalHits); - if (hasTotalHits) { - totalHits.writeTo(out); - } - out.writeFloat(maxScore); - out.writeVInt(hits.length); - if (hits.length > 0) { - for (SearchHit hit : hits) { - hit.writeTo(out); - } - } - if (out.getVersion().onOrAfter(Version.V_6_6_0)) { - out.writeOptionalArray(Lucene::writeSortField, sortFields); - out.writeOptionalString(collapseField); - out.writeOptionalArray(Lucene::writeSortValue, collapseValues); - } - } - @Override public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java index 60ae4c78edc7e..4a266ee0703bd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java @@ -65,7 +65,7 @@ public InternalTopHits(StreamInput in) throws IOException { from = in.readVInt(); size = in.readVInt(); topDocs = Lucene.readTopDocs(in); - searchHits = SearchHits.readSearchHits(in); + searchHits = new SearchHits(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java index 12391151861d0..400ab3623c0e8 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java @@ -22,9 +22,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.query.QuerySearchResult; import java.io.IOException; @@ -92,7 +92,7 @@ public static FetchSearchResult readFetchSearchResult(StreamInput in) throws IOE public void readFrom(StreamInput in) throws IOException { super.readFrom(in); requestId = in.readLong(); - hits = SearchHits.readSearchHits(in); + hits = new SearchHits(in); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java index cf1596fd326b9..daafe0970290b 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsContext.java @@ -38,8 +38,6 @@ */ public class DocValueFieldsContext { - public static final String USE_DEFAULT_FORMAT = "use_field_mapping"; - /** * Wrapper around a field name and the format that should be used to * display values of this field. diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java index eae3188d865b8..e94ac0fdf6c99 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java @@ -28,7 +28,6 @@ import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; -import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.MappedFieldType; @@ -46,7 +45,6 @@ import java.util.HashMap; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; /** * Query sub phase which pulls data from doc values @@ -55,7 +53,8 @@ */ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase { - private static final DeprecationLogger deprecationLogger = new DeprecationLogger( + private static final String USE_DEFAULT_FORMAT = "use_field_mapping"; + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger( LogManager.getLogger(DocValueFieldsFetchSubPhase.class)); @Override @@ -66,9 +65,9 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept String name = context.collapse().getFieldName(); if (context.docValueFieldsContext() == null) { context.docValueFieldsContext(new DocValueFieldsContext( - Collections.singletonList(new FieldAndFormat(name, DocValueFieldsContext.USE_DEFAULT_FORMAT)))); + Collections.singletonList(new FieldAndFormat(name, null)))); } else if (context.docValueFieldsContext().fields().stream().map(ff -> ff.field).anyMatch(name::equals) == false) { - context.docValueFieldsContext().fields().add(new FieldAndFormat(name, DocValueFieldsContext.USE_DEFAULT_FORMAT)); + context.docValueFieldsContext().fields().add(new FieldAndFormat(name, null)); } } @@ -79,13 +78,13 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept hits = hits.clone(); // don't modify the incoming hits Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); - List noFormatFields = context.docValueFieldsContext().fields().stream().filter(f -> f.format == null).map(f -> f.field) - .collect(Collectors.toList()); - if (noFormatFields.isEmpty() == false) { - deprecationLogger.deprecated("There are doc-value fields which are not using a format. The output will " - + "change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass " - + "[format={}] with a doc value field in order to opt in for the future behaviour and ease the migration to " - + "7.0: {}", DocValueFieldsContext.USE_DEFAULT_FORMAT, noFormatFields); + if (context.docValueFieldsContext().fields().stream() + .map(f -> f.format) + .filter(USE_DEFAULT_FORMAT::equals) + .findAny() + .isPresent()) { + DEPRECATION_LOGGER.deprecated("[" + USE_DEFAULT_FORMAT + "] is a special format that was only used to " + + "ease the transition to 7.x. It has become the default and shouldn't be set explicitly anymore."); } for (FieldAndFormat fieldAndFormat : context.docValueFieldsContext().fields()) { @@ -93,19 +92,14 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept MappedFieldType fieldType = context.mapperService().fullName(field); if (fieldType != null) { final IndexFieldData indexFieldData = context.getForField(fieldType); - final DocValueFormat format; - if (fieldAndFormat.format == null) { - format = null; - } else { - String formatDesc = fieldAndFormat.format; - if (Objects.equals(formatDesc, DocValueFieldsContext.USE_DEFAULT_FORMAT)) { - formatDesc = null; - } - format = fieldType.docValueFormat(formatDesc, null); + String formatDesc = fieldAndFormat.format; + if (Objects.equals(formatDesc, USE_DEFAULT_FORMAT)) { + // TODO: Remove in 8.x + formatDesc = null; } + final DocValueFormat format = fieldType.docValueFormat(formatDesc, null); LeafReaderContext subReaderContext = null; AtomicFieldData data = null; - ScriptDocValues scriptValues = null; // legacy SortedBinaryDocValues binaryValues = null; // binary / string / ip fields SortedNumericDocValues longValues = null; // int / date fields SortedNumericDoubleValues doubleValues = null; // floating-point fields @@ -115,9 +109,7 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept int readerIndex = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex); data = indexFieldData.load(subReaderContext); - if (format == null) { - scriptValues = data.getLegacyFieldValues(); - } else if (indexFieldData instanceof IndexNumericFieldData) { + if (indexFieldData instanceof IndexNumericFieldData) { if (((IndexNumericFieldData) indexFieldData).getNumericType().isFloatingPoint()) { doubleValues = ((AtomicNumericFieldData) data).getDoubleValues(); } else { @@ -138,10 +130,7 @@ public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOExcept final List values = hitField.getValues(); int subDocId = hit.docId() - subReaderContext.docBase; - if (scriptValues != null) { - scriptValues.setNextDocId(subDocId); - values.addAll(scriptValues); - } else if (binaryValues != null) { + if (binaryValues != null) { if (binaryValues.advanceExact(subDocId)) { for (int i = 0, count = binaryValues.docValueCount(); i < count; ++i) { values.add(format.format(binaryValues.nextValue())); diff --git a/server/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/server/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java index e78ce7f3fb194..bac7b6486bb88 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java +++ b/server/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java @@ -51,7 +51,7 @@ public InternalSearchResponse(SearchHits hits, InternalAggregations aggregations public InternalSearchResponse(StreamInput in) throws IOException { super( - SearchHits.readSearchHits(in), + new SearchHits(in), in.readBoolean() ? InternalAggregations.readAggregations(in) : null, in.readBoolean() ? new Suggest(in) : null, in.readBoolean(), diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java index 34c191df3b36a..bc2e2abdb7d03 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java @@ -292,7 +292,7 @@ public Option(StreamInput in) throws IOException { super(in); this.doc = Lucene.readScoreDoc(in); if (in.readBoolean()) { - this.hit = SearchHit.readSearchHit(in); + this.hit = new SearchHit(in); } int contextSize = in.readInt(); this.contexts = new LinkedHashMap<>(contextSize); diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/TopSuggestGroupDocsCollector.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/TopSuggestGroupDocsCollector.java index 3dfb38bef9dd4..30c642dcdeb3b 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/TopSuggestGroupDocsCollector.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/TopSuggestGroupDocsCollector.java @@ -60,7 +60,9 @@ public void collect(int docID, CharSequence key, CharSequence context, float sco int globalDoc = docID + docBase; boolean isNewDoc = docContexts.containsKey(globalDoc) == false; List contexts = docContexts.computeIfAbsent(globalDoc, k -> new ArrayList<>()); - contexts.add(context); + if (context != null) { + contexts.add(context); + } if (isNewDoc) { super.collect(docID, key, context, score); } diff --git a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index 328950e4f3569..d9de69a1c6c62 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -252,6 +252,9 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL assertTrue(request.requests().stream().allMatch((r) -> version == r.source().version())); assertTrue(request.requests().stream().allMatch((r) -> seqNoAndTerm == r.source().seqNoAndPrimaryTerm())); assertTrue(request.requests().stream().allMatch((r) -> postFilter.equals(r.source().postFilter()))); + assertTrue(request.requests().stream().allMatch((r) -> r.source().fetchSource().fetchSource() == false)); + assertTrue(request.requests().stream().allMatch((r) -> r.source().fetchSource().includes().length == 0)); + assertTrue(request.requests().stream().allMatch((r) -> r.source().fetchSource().excludes().length == 0)); } }; mockSearchPhaseContext.getRequest().source(new SearchSourceBuilder() @@ -259,6 +262,7 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL new CollapseBuilder("someField") .setInnerHits(new InnerHitBuilder().setName("foobarbaz").setVersion(version).setSeqNoAndPrimaryTerm(seqNoAndTerm)) ) + .fetchSource(false) .postFilter(QueryBuilders.existsQuery("foo"))) .preference("foobar") .routing("baz"); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 518a60ffe38f6..f2a87d09eb1f2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -310,6 +310,32 @@ public void testWriteIndexValidationException() throws Exception { assertThat(exception.getMessage(), startsWith("alias [alias1] has more than one write index [")); } + public void testTypelessTemplateWithTypedIndexCreation() throws Exception { + addMatchingTemplate(builder -> builder.putMapping("type", "{\"type\": {}}")); + setupRequestMapping(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent("{\"_doc\":{}}")); + executeTask(); + assertThat(getMappingsFromResponse(), Matchers.hasKey(MapperService.SINGLE_MAPPING_NAME)); + } + + public void testTypedTemplateWithTypelessIndexCreation() throws Exception { + addMatchingTemplate(builder -> builder.putMapping(MapperService.SINGLE_MAPPING_NAME, "{\"_doc\": {}}")); + setupRequestMapping("type", new CompressedXContent("{\"type\":{}}")); + executeTask(); + assertThat(getMappingsFromResponse(), Matchers.hasKey("type")); + } + + public void testTypedTemplate() throws Exception { + addMatchingTemplate(builder -> builder.putMapping("type", "{\"type\": {}}")); + executeTask(); + assertThat(getMappingsFromResponse(), Matchers.hasKey("type")); + } + + public void testTypelessTemplate() throws Exception { + addMatchingTemplate(builder -> builder.putMapping(MapperService.SINGLE_MAPPING_NAME, "{\"_doc\": {}}")); + executeTask(); + assertThat(getMappingsFromResponse(), Matchers.hasKey(MapperService.SINGLE_MAPPING_NAME)); + } + private IndexRoutingTable createIndexRoutingTableWithStartedShards(Index index) { final IndexRoutingTable idxRoutingTable = mock(IndexRoutingTable.class); diff --git a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java index c7abea63be081..423592d6d18d7 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java @@ -97,18 +97,21 @@ public void testDuellingFormatsValidParsing() { assertSameDate("20181126T121212+0100", "basic_date_time_no_millis"); assertSameDate("2018363", "basic_ordinal_date"); assertSameDate("2018363T121212.123Z", "basic_ordinal_date_time"); + assertSameDate("2018363T121212.123456789Z", "basic_ordinal_date_time"); assertSameDate("2018363T121212.123+0100", "basic_ordinal_date_time"); assertSameDate("2018363T121212.123+01:00", "basic_ordinal_date_time"); assertSameDate("2018363T121212Z", "basic_ordinal_date_time_no_millis"); assertSameDate("2018363T121212+0100", "basic_ordinal_date_time_no_millis"); assertSameDate("2018363T121212+01:00", "basic_ordinal_date_time_no_millis"); assertSameDate("121212.123Z", "basic_time"); + assertSameDate("121212.123456789Z", "basic_time"); assertSameDate("121212.123+0100", "basic_time"); assertSameDate("121212.123+01:00", "basic_time"); assertSameDate("121212Z", "basic_time_no_millis"); assertSameDate("121212+0100", "basic_time_no_millis"); assertSameDate("121212+01:00", "basic_time_no_millis"); assertSameDate("T121212.123Z", "basic_t_time"); + assertSameDate("T121212.123456789Z", "basic_t_time"); assertSameDate("T121212.123+0100", "basic_t_time"); assertSameDate("T121212.123+01:00", "basic_t_time"); assertSameDate("T121212Z", "basic_t_time_no_millis"); @@ -118,6 +121,7 @@ public void testDuellingFormatsValidParsing() { assertSameDate("1W313", "basic_week_date"); assertSameDate("18W313", "basic_week_date"); assertSameDate("2018W313T121212.123Z", "basic_week_date_time"); + assertSameDate("2018W313T121212.123456789Z", "basic_week_date_time"); assertSameDate("2018W313T121212.123+0100", "basic_week_date_time"); assertSameDate("2018W313T121212.123+01:00", "basic_week_date_time"); assertSameDate("2018W313T121212Z", "basic_week_date_time_no_millis"); @@ -138,7 +142,9 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018-12-31T12:12:1", "date_hour_minute_second"); assertSameDate("2018-12-31T12:12:12.123", "date_hour_minute_second_fraction"); + assertSameDate("2018-12-31T12:12:12.123456789", "date_hour_minute_second_fraction"); assertSameDate("2018-12-31T12:12:12.123", "date_hour_minute_second_millis"); + assertParseException("2018-12-31T12:12:12.123456789", "date_hour_minute_second_millis"); assertSameDate("2018-12-31T12:12:12.1", "date_hour_minute_second_millis"); assertSameDate("2018-12-31T12:12:12.1", "date_hour_minute_second_fraction"); @@ -148,7 +154,9 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018-05-30T20:21", "date_optional_time"); assertSameDate("2018-05-30T20:21:23", "date_optional_time"); assertSameDate("2018-05-30T20:21:23.123", "date_optional_time"); + assertSameDate("2018-05-30T20:21:23.123456789", "date_optional_time"); assertSameDate("2018-05-30T20:21:23.123Z", "date_optional_time"); + assertSameDate("2018-05-30T20:21:23.123456789Z", "date_optional_time"); assertSameDate("2018-05-30T20:21:23.123+0100", "date_optional_time"); assertSameDate("2018-05-30T20:21:23.123+01:00", "date_optional_time"); assertSameDate("2018-12-1", "date_optional_time"); @@ -158,12 +166,14 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018-12-31T1:15:30", "date_optional_time"); assertSameDate("2018-12-31T10:15:30.123Z", "date_time"); + assertSameDate("2018-12-31T10:15:30.123456789Z", "date_time"); assertSameDate("2018-12-31T10:15:30.123+0100", "date_time"); assertSameDate("2018-12-31T10:15:30.123+01:00", "date_time"); assertSameDate("2018-12-31T10:15:30.11Z", "date_time"); assertSameDate("2018-12-31T10:15:30.11+0100", "date_time"); assertSameDate("2018-12-31T10:15:30.11+01:00", "date_time"); assertSameDate("2018-12-31T10:15:3.123Z", "date_time"); + assertSameDate("2018-12-31T10:15:3.123456789Z", "date_time"); assertSameDate("2018-12-31T10:15:3.123+0100", "date_time"); assertSameDate("2018-12-31T10:15:3.123+01:00", "date_time"); @@ -193,9 +203,11 @@ public void testDuellingFormatsValidParsing() { assertSameDate("12:12:1", "hour_minute_second"); assertSameDate("12:12:12.123", "hour_minute_second_fraction"); + assertSameDate("12:12:12.123456789", "hour_minute_second_fraction"); assertSameDate("12:12:12.1", "hour_minute_second_fraction"); assertParseException("12:12:12", "hour_minute_second_fraction"); assertSameDate("12:12:12.123", "hour_minute_second_millis"); + assertParseException("12:12:12.123456789", "hour_minute_second_millis"); assertSameDate("12:12:12.1", "hour_minute_second_millis"); assertParseException("12:12:12", "hour_minute_second_millis"); @@ -203,9 +215,11 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018-1", "ordinal_date"); assertSameDate("2018-128T10:15:30.123Z", "ordinal_date_time"); + assertSameDate("2018-128T10:15:30.123456789Z", "ordinal_date_time"); assertSameDate("2018-128T10:15:30.123+0100", "ordinal_date_time"); assertSameDate("2018-128T10:15:30.123+01:00", "ordinal_date_time"); assertSameDate("2018-1T10:15:30.123Z", "ordinal_date_time"); + assertSameDate("2018-1T10:15:30.123456789Z", "ordinal_date_time"); assertSameDate("2018-1T10:15:30.123+0100", "ordinal_date_time"); assertSameDate("2018-1T10:15:30.123+01:00", "ordinal_date_time"); @@ -217,6 +231,7 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018-1T10:15:30+01:00", "ordinal_date_time_no_millis"); assertSameDate("10:15:30.123Z", "time"); + assertSameDate("10:15:30.123456789Z", "time"); assertSameDate("10:15:30.123+0100", "time"); assertSameDate("10:15:30.123+01:00", "time"); assertSameDate("1:15:30.123Z", "time"); @@ -249,6 +264,7 @@ public void testDuellingFormatsValidParsing() { assertParseException("10:15:3", "time_no_millis"); assertSameDate("T10:15:30.123Z", "t_time"); + assertSameDate("T10:15:30.123456789Z", "t_time"); assertSameDate("T10:15:30.123+0100", "t_time"); assertSameDate("T10:15:30.123+01:00", "t_time"); assertSameDate("T1:15:30.123Z", "t_time"); @@ -286,6 +302,7 @@ public void testDuellingFormatsValidParsing() { assertJavaTimeParseException("2012-W1-8", "week_date"); assertSameDate("2012-W48-6T10:15:30.123Z", "week_date_time"); + assertSameDate("2012-W48-6T10:15:30.123456789Z", "week_date_time"); assertSameDate("2012-W48-6T10:15:30.123+0100", "week_date_time"); assertSameDate("2012-W48-6T10:15:30.123+01:00", "week_date_time"); assertSameDate("2012-W1-6T10:15:30.123Z", "week_date_time"); @@ -326,9 +343,11 @@ public void testDuelingStrictParsing() { assertSameDate("2018W313", "strict_basic_week_date"); assertParseException("18W313", "strict_basic_week_date"); assertSameDate("2018W313T121212.123Z", "strict_basic_week_date_time"); + assertSameDate("2018W313T121212.123456789Z", "strict_basic_week_date_time"); assertSameDate("2018W313T121212.123+0100", "strict_basic_week_date_time"); assertSameDate("2018W313T121212.123+01:00", "strict_basic_week_date_time"); assertParseException("2018W313T12128.123Z", "strict_basic_week_date_time"); + assertParseException("2018W313T12128.123456789Z", "strict_basic_week_date_time"); assertParseException("2018W313T81212.123Z", "strict_basic_week_date_time"); assertParseException("2018W313T12812.123Z", "strict_basic_week_date_time"); assertParseException("2018W313T12812.1Z", "strict_basic_week_date_time"); @@ -354,6 +373,7 @@ public void testDuelingStrictParsing() { assertSameDate("2018-12-31T12:12:12", "strict_date_hour_minute_second"); assertParseException("2018-12-31T12:12:1", "strict_date_hour_minute_second"); assertSameDate("2018-12-31T12:12:12.123", "strict_date_hour_minute_second_fraction"); + assertSameDate("2018-12-31T12:12:12.123456789", "strict_date_hour_minute_second_fraction"); assertSameDate("2018-12-31T12:12:12.123", "strict_date_hour_minute_second_millis"); assertSameDate("2018-12-31T12:12:12.1", "strict_date_hour_minute_second_millis"); assertSameDate("2018-12-31T12:12:12.1", "strict_date_hour_minute_second_fraction"); @@ -373,6 +393,7 @@ public void testDuelingStrictParsing() { assertParseException("2018-12-31T9:15:30", "strict_date_optional_time"); assertSameDate("2015-01-04T00:00Z", "strict_date_optional_time"); assertSameDate("2018-12-31T10:15:30.123Z", "strict_date_time"); + assertSameDate("2018-12-31T10:15:30.123456789Z", "strict_date_time"); assertSameDate("2018-12-31T10:15:30.123+0100", "strict_date_time"); assertSameDate("2018-12-31T10:15:30.123+01:00", "strict_date_time"); assertSameDate("2018-12-31T10:15:30.11Z", "strict_date_time"); @@ -397,6 +418,7 @@ public void testDuelingStrictParsing() { assertSameDate("12:12:01", "strict_hour_minute_second"); assertParseException("12:12:1", "strict_hour_minute_second"); assertSameDate("12:12:12.123", "strict_hour_minute_second_fraction"); + assertSameDate("12:12:12.123456789", "strict_hour_minute_second_fraction"); assertSameDate("12:12:12.1", "strict_hour_minute_second_fraction"); assertParseException("12:12:12", "strict_hour_minute_second_fraction"); assertSameDate("12:12:12.123", "strict_hour_minute_second_millis"); @@ -406,6 +428,7 @@ public void testDuelingStrictParsing() { assertParseException("2018-1", "strict_ordinal_date"); assertSameDate("2018-128T10:15:30.123Z", "strict_ordinal_date_time"); + assertSameDate("2018-128T10:15:30.123456789Z", "strict_ordinal_date_time"); assertSameDate("2018-128T10:15:30.123+0100", "strict_ordinal_date_time"); assertSameDate("2018-128T10:15:30.123+01:00", "strict_ordinal_date_time"); assertParseException("2018-1T10:15:30.123Z", "strict_ordinal_date_time"); @@ -416,6 +439,7 @@ public void testDuelingStrictParsing() { assertParseException("2018-1T10:15:30Z", "strict_ordinal_date_time_no_millis"); assertSameDate("10:15:30.123Z", "strict_time"); + assertSameDate("10:15:30.123456789Z", "strict_time"); assertSameDate("10:15:30.123+0100", "strict_time"); assertSameDate("10:15:30.123+01:00", "strict_time"); assertParseException("1:15:30.123Z", "strict_time"); @@ -436,6 +460,7 @@ public void testDuelingStrictParsing() { assertParseException("10:15:3", "strict_time_no_millis"); assertSameDate("T10:15:30.123Z", "strict_t_time"); + assertSameDate("T10:15:30.123456789Z", "strict_t_time"); assertSameDate("T10:15:30.123+0100", "strict_t_time"); assertSameDate("T10:15:30.123+01:00", "strict_t_time"); assertParseException("T1:15:30.123Z", "strict_t_time"); @@ -466,6 +491,7 @@ public void testDuelingStrictParsing() { assertJavaTimeParseException("2012-W01-8", "strict_week_date"); assertSameDate("2012-W48-6T10:15:30.123Z", "strict_week_date_time"); + assertSameDate("2012-W48-6T10:15:30.123456789Z", "strict_week_date_time"); assertSameDate("2012-W48-6T10:15:30.123+0100", "strict_week_date_time"); assertSameDate("2012-W48-6T10:15:30.123+01:00", "strict_week_date_time"); assertParseException("2012-W1-6T10:15:30.123Z", "strict_week_date_time"); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index f57bff72fc57c..d984d1702f257 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -5313,7 +5313,7 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { final List leases = new ArrayList<>(length); for (int i = 0; i < length; i++) { final String id = randomAlphaOfLength(8); - final long retainingSequenceNumber = randomLongBetween(0L, Math.max(0L, globalCheckpoint.get())); + final long retainingSequenceNumber = randomLongBetween(0, Math.max(0, globalCheckpoint.get())); final long timestamp = randomLongBetween(0L, Long.MAX_VALUE); final String source = randomAlphaOfLength(8); leases.add(new RetentionLease(id, retainingSequenceNumber, timestamp, source)); diff --git a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java index 310e83e9d2cef..e15372d687e55 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/SoftDeletesPolicyTests.java @@ -49,7 +49,7 @@ public void testSoftDeletesRetentionLock() { AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final AtomicLong[] retainingSequenceNumbers = new AtomicLong[randomIntBetween(0, 8)]; for (int i = 0; i < retainingSequenceNumbers.length; i++) { - retainingSequenceNumbers[i] = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); + retainingSequenceNumbers[i] = new AtomicLong(); } final Supplier> retentionLeasesSupplier = () -> { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java index 62f3495ee172b..34200b51cb317 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/AllFieldMapperTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.mapper; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -26,9 +28,75 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.MapperService.MergeReason; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.VersionUtils; + +import static org.hamcrest.CoreMatchers.containsString; public class AllFieldMapperTests extends ESSingleNodeTestCase { + @Override + protected boolean forbidPrivateIndexSettings() { + return false; + } + + public void testAllDisabled() throws Exception { + { + final Version version = VersionUtils.randomVersionBetween(random(), + Version.V_6_0_0, Version.V_7_0_0.minimumCompatibilityVersion()); + IndexService indexService = createIndex("test_6x", + Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, version) + .build() + ); + String mappingDisabled = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("_all") + .field("enabled", false) + .endObject().endObject() + ); + indexService.mapperService().merge("_doc", new CompressedXContent(mappingDisabled), MergeReason.MAPPING_UPDATE); + + String mappingEnabled = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("_all") + .field("enabled", true) + .endObject().endObject() + ); + MapperParsingException exc = expectThrows(MapperParsingException.class, + () -> indexService.mapperService().merge("_doc", new CompressedXContent(mappingEnabled), MergeReason.MAPPING_UPDATE)); + assertThat(exc.getMessage(), containsString("[_all] is disabled in this version.")); + } + { + IndexService indexService = createIndex("test"); + String mappingEnabled = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("_all") + .field("enabled", true) + .endObject().endObject() + ); + MapperParsingException exc = expectThrows(MapperParsingException.class, + () -> indexService.mapperService().merge("_doc", new CompressedXContent(mappingEnabled), MergeReason.MAPPING_UPDATE)); + assertThat(exc.getMessage(), containsString("unsupported parameters: [_all")); + + String mappingDisabled = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("_all") + .field("enabled", false) + .endObject().endObject() + ); + exc = expectThrows(MapperParsingException.class, + () -> indexService.mapperService().merge("_doc", new CompressedXContent(mappingDisabled), MergeReason.MAPPING_UPDATE)); + assertThat(exc.getMessage(), containsString("unsupported parameters: [_all")); + + String mappingAll = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("_all").endObject().endObject() + ); + exc = expectThrows(MapperParsingException.class, + () -> indexService.mapperService().merge("_doc", new CompressedXContent(mappingAll), MergeReason.MAPPING_UPDATE)); + assertThat(exc.getMessage(), containsString("unsupported parameters: [_all")); + + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().endObject()); + indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + assertEquals("{\"_doc\":{}}", indexService.mapperService().documentMapper("_doc").mapping().toString()); + } + } + public void testUpdateDefaultSearchAnalyzer() throws Exception { IndexService indexService = createIndex("test", Settings.builder() .put("index.analysis.analyzer.default_search.type", "custom") diff --git a/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java index ad02209bf5dae..a5329856630d5 100644 --- a/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/ExistsQueryBuilderTests.java @@ -28,7 +28,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; @@ -55,8 +54,6 @@ protected ExistsQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean()) { if (randomBoolean()) { fieldPattern = fieldPattern + "*"; - } else { - fieldPattern = MetaData.ALL; } } return new ExistsQueryBuilder(fieldPattern); diff --git a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java index 257ee807419b6..54d478a7f6aec 100644 --- a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFormat; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilderTests; @@ -158,8 +157,7 @@ public static InnerHitBuilder randomInnerHits() { innerHits.setStoredFieldNames(randomListStuff(16, () -> randomAlphaOfLengthBetween(1, 16))); } innerHits.setDocValueFields(randomListStuff(16, - () -> new FieldAndFormat(randomAlphaOfLengthBetween(1, 16), - randomBoolean() ? null : DocValueFieldsContext.USE_DEFAULT_FORMAT))); + () -> new FieldAndFormat(randomAlphaOfLengthBetween(1, 16), null))); // Random script fields deduped on their field name. Map scriptFields = new HashMap<>(); for (SearchSourceBuilder.ScriptField field: randomListStuff(16, InnerHitBuilderTests::randomScript)) { @@ -201,8 +199,7 @@ static InnerHitBuilder mutate(InnerHitBuilder original) throws IOException { modifiers.add(() -> { if (randomBoolean()) { copy.setDocValueFields(randomValueOtherThan(copy.getDocValueFields(), - () -> randomListStuff(16, () -> new FieldAndFormat(randomAlphaOfLengthBetween(1, 16), - randomBoolean() ? null : DocValueFieldsContext.USE_DEFAULT_FORMAT)))); + () -> randomListStuff(16, () -> new FieldAndFormat(randomAlphaOfLengthBetween(1, 16), null)))); } else { copy.addDocValueField(randomAlphaOfLengthBetween(1, 16)); } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index 7a867027412e1..90eb162374469 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -156,7 +156,7 @@ private void runExpirationTest(final boolean primaryMode) { replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); } final long[] retainingSequenceNumbers = new long[1]; - retainingSequenceNumbers[0] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + retainingSequenceNumbers[0] = randomLongBetween(0, Long.MAX_VALUE); if (primaryMode) { replicationTracker.addRetentionLease("0", retainingSequenceNumbers[0], "test-0", ActionListener.wrap(() -> {})); } else { diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java index 7d6e5fa2dc5a6..a99d0caea8e6c 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java @@ -67,7 +67,7 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { final Map currentRetentionLeases = new HashMap<>(); for (int i = 0; i < length; i++) { final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); - final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); @@ -119,7 +119,7 @@ public void testRetentionLeasesSyncOnExpiration() throws Exception { final int length = randomIntBetween(1, 8); for (int i = 0; i < length; i++) { final String id = randomAlphaOfLength(8); - final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); final String source = randomAlphaOfLength(8); final CountDownLatch latch = new CountDownLatch(1); final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java index 500393f2cfac2..1a8d159c18757 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseTests.java @@ -28,7 +28,6 @@ import java.util.Collection; import java.util.List; -import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; @@ -53,7 +52,7 @@ public void testEmptyId() { } public void testRetainingSequenceNumberOutOfRange() { - final long retainingSequenceNumber = randomLongBetween(Long.MIN_VALUE, UNASSIGNED_SEQ_NO - 1); + final long retainingSequenceNumber = randomLongBetween(Long.MIN_VALUE, -1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> new RetentionLease("id", retainingSequenceNumber, randomNonNegativeLong(), "source")); @@ -66,7 +65,7 @@ public void testTimestampOutOfRange() { final long timestamp = randomLongBetween(Long.MIN_VALUE, -1); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> new RetentionLease("id", randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE), timestamp, "source")); + () -> new RetentionLease("id", randomNonNegativeLong(), timestamp, "source")); assertThat(e, hasToString(containsString("retention lease timestamp [" + timestamp + "] out of range"))); } @@ -87,7 +86,7 @@ public void testEmptySource() { public void testRetentionLeaseSerialization() throws IOException { final String id = randomAlphaOfLength(8); - final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + final long retainingSequenceNumber = randomLongBetween(0, Long.MAX_VALUE); final long timestamp = randomNonNegativeLong(); final String source = randomAlphaOfLength(8); final RetentionLease retentionLease = new RetentionLease(id, retainingSequenceNumber, timestamp, source); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java index cd7d2a2c12cb8..26e67fb6dd264 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java @@ -113,7 +113,7 @@ private void runExpirationTest(final boolean primary) throws IOException { final IndexShard indexShard = newStartedShard(primary, settings, new InternalEngineFactory()); try { final long[] retainingSequenceNumbers = new long[1]; - retainingSequenceNumbers[0] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + retainingSequenceNumbers[0] = randomLongBetween(0, Long.MAX_VALUE); if (primary) { indexShard.addRetentionLease("0", retainingSequenceNumbers[0], "test-0", ActionListener.wrap(() -> {})); } else { diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index 9b88c6ab8f20c..f31ac0627138e 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.indices; +import org.elasticsearch.Version; +import org.elasticsearch.index.mapper.AllFieldMapper; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IgnoredFieldMapper; @@ -36,6 +38,7 @@ import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import java.util.ArrayList; import java.util.Arrays; @@ -87,14 +90,36 @@ public Map getMetadataMappers() { RoutingFieldMapper.NAME, IndexFieldMapper.NAME, SourceFieldMapper.NAME, TypeFieldMapper.NAME, VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, FieldNamesFieldMapper.NAME}; + private static String[] EXPECTED_METADATA_FIELDS_6x = new String[]{AllFieldMapper.NAME, IgnoredFieldMapper.NAME, + IdFieldMapper.NAME, RoutingFieldMapper.NAME, IndexFieldMapper.NAME, SourceFieldMapper.NAME, TypeFieldMapper.NAME, + VersionFieldMapper.NAME, SeqNoFieldMapper.NAME, FieldNamesFieldMapper.NAME}; + + public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); - assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty()); - assertFalse(module.getMapperRegistry().getMetadataMapperParsers().isEmpty()); - Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(); - int i = 0; - for (String field : metadataMapperParsers.keySet()) { - assertEquals(EXPECTED_METADATA_FIELDS[i++], field); + { + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0.minimumCompatibilityVersion()); + assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty()); + assertFalse(module.getMapperRegistry().getMetadataMapperParsers(version).isEmpty()); + Map metadataMapperParsers = + module.getMapperRegistry().getMetadataMapperParsers(version); + assertEquals(EXPECTED_METADATA_FIELDS_6x.length, metadataMapperParsers.size()); + int i = 0; + for (String field : metadataMapperParsers.keySet()) { + assertEquals(EXPECTED_METADATA_FIELDS_6x[i++], field); + } + } + { + Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.CURRENT); + assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty()); + assertFalse(module.getMapperRegistry().getMetadataMapperParsers(version).isEmpty()); + Map metadataMapperParsers = + module.getMapperRegistry().getMetadataMapperParsers(version); + assertEquals(EXPECTED_METADATA_FIELDS.length, metadataMapperParsers.size()); + int i = 0; + for (String field : metadataMapperParsers.keySet()) { + assertEquals(EXPECTED_METADATA_FIELDS[i++], field); + } } } @@ -102,11 +127,15 @@ public void testBuiltinWithPlugins() { IndicesModule noPluginsModule = new IndicesModule(Collections.emptyList()); IndicesModule module = new IndicesModule(fakePlugins); MapperRegistry registry = module.getMapperRegistry(); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0.minimumCompatibilityVersion()); assertThat(registry.getMapperParsers().size(), greaterThan(noPluginsModule.getMapperRegistry().getMapperParsers().size())); - assertThat(registry.getMetadataMapperParsers().size(), - greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers().size())); - Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(); + assertThat(registry.getMetadataMapperParsers(version).size(), + greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers(version).size())); + Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(version); Iterator iterator = metadataMapperParsers.keySet().iterator(); + if (version.before(Version.V_7_0_0)) { + assertEquals(AllFieldMapper.NAME, iterator.next()); + } assertEquals(IgnoredFieldMapper.NAME, iterator.next()); String last = null; while(iterator.hasNext()) { @@ -187,13 +216,15 @@ public Map getMetadataMappers() { public void testFieldNamesIsLast() { IndicesModule module = new IndicesModule(Collections.emptyList()); - List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers().keySet()); + Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } public void testFieldNamesIsLastWithPlugins() { IndicesModule module = new IndicesModule(fakePlugins); - List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers().keySet()); + Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); + List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index b68ec6d0598e5..60dbad99795f3 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -68,6 +68,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.hamcrest.RegexMatcher; import java.io.IOException; @@ -515,9 +516,10 @@ public void testStatsByShardDoesNotDieFromExpectedExceptions() { public void testIsMetaDataField() { IndicesService indicesService = getIndicesService(); - assertFalse(indicesService.isMetaDataField(randomAlphaOfLengthBetween(10, 15))); + final Version randVersion = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT); + assertFalse(indicesService.isMetaDataField(randVersion, randomAlphaOfLengthBetween(10, 15))); for (String builtIn : IndicesModule.getBuiltInMetaDataFields()) { - assertTrue(indicesService.isMetaDataField(builtIn)); + assertTrue(indicesService.isMetaDataField(randVersion, builtIn)); } } diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesActionTests.java new file mode 100644 index 0000000000000..ced52096687a2 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesActionTests.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.indices; + +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.rest.RestStatus.NOT_FOUND; +import static org.hamcrest.Matchers.equalTo; + +public class RestGetAliasesActionTests extends ESTestCase { + +// # Assumes the following setup +// curl -X PUT "localhost:9200/index" -H "Content-Type: application/json" -d' +// { +// "aliases": { +// "foo": {}, +// "foobar": {} +// } +// }' + + public void testBareRequest() throws Exception { + final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final ImmutableOpenMap.Builder> openMapBuilder = ImmutableOpenMap.builder(); + final AliasMetaData foobarAliasMetaData = AliasMetaData.builder("foobar").build(); + final AliasMetaData fooAliasMetaData = AliasMetaData.builder("foo").build(); + openMapBuilder.put("index", Arrays.asList(fooAliasMetaData, foobarAliasMetaData)); + final RestResponse restResponse = RestGetAliasesAction.buildRestResponse(false, new String[0], openMapBuilder.build(), + xContentBuilder); + assertThat(restResponse.status(), equalTo(OK)); + assertThat(restResponse.contentType(), equalTo("application/json; charset=UTF-8")); + assertThat(restResponse.content().utf8ToString(), equalTo("{\"index\":{\"aliases\":{\"foo\":{},\"foobar\":{}}}}")); + } + + public void testSimpleAliasWildcardMatchingNothing() throws Exception { + final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final ImmutableOpenMap.Builder> openMapBuilder = ImmutableOpenMap.builder(); + final RestResponse restResponse = RestGetAliasesAction.buildRestResponse(true, new String[] { "baz*" }, openMapBuilder.build(), + xContentBuilder); + assertThat(restResponse.status(), equalTo(OK)); + assertThat(restResponse.contentType(), equalTo("application/json; charset=UTF-8")); + assertThat(restResponse.content().utf8ToString(), equalTo("{}")); + } + + public void testMultipleAliasWildcardsSomeMatching() throws Exception { + final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final ImmutableOpenMap.Builder> openMapBuilder = ImmutableOpenMap.builder(); + final AliasMetaData aliasMetaData = AliasMetaData.builder("foobar").build(); + openMapBuilder.put("index", Arrays.asList(aliasMetaData)); + final RestResponse restResponse = RestGetAliasesAction.buildRestResponse(true, new String[] { "baz*", "foobar*" }, + openMapBuilder.build(), xContentBuilder); + assertThat(restResponse.status(), equalTo(OK)); + assertThat(restResponse.contentType(), equalTo("application/json; charset=UTF-8")); + assertThat(restResponse.content().utf8ToString(), equalTo("{\"index\":{\"aliases\":{\"foobar\":{}}}}")); + } + + public void testAliasWildcardsIncludeAndExcludeAll() throws Exception { + final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final ImmutableOpenMap.Builder> openMapBuilder = ImmutableOpenMap.builder(); + final RestResponse restResponse = RestGetAliasesAction.buildRestResponse(true, new String[] { "foob*", "-foo*" }, + openMapBuilder.build(), xContentBuilder); + assertThat(restResponse.status(), equalTo(OK)); + assertThat(restResponse.contentType(), equalTo("application/json; charset=UTF-8")); + assertThat(restResponse.content().utf8ToString(), equalTo("{}")); + } + + public void testAliasWildcardsIncludeAndExcludeSome() throws Exception { + final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final ImmutableOpenMap.Builder> openMapBuilder = ImmutableOpenMap.builder(); + final AliasMetaData aliasMetaData = AliasMetaData.builder("foo").build(); + openMapBuilder.put("index", Arrays.asList(aliasMetaData)); + final RestResponse restResponse = RestGetAliasesAction.buildRestResponse(true, new String[] { "foo*", "-foob*" }, + openMapBuilder.build(), xContentBuilder); + assertThat(restResponse.status(), equalTo(OK)); + assertThat(restResponse.contentType(), equalTo("application/json; charset=UTF-8")); + assertThat(restResponse.content().utf8ToString(), equalTo("{\"index\":{\"aliases\":{\"foo\":{}}}}")); + } + + public void testAliasWildcardsIncludeAndExcludeSomeAndExplicitMissing() throws Exception { + final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final ImmutableOpenMap.Builder> openMapBuilder = ImmutableOpenMap.builder(); + final AliasMetaData aliasMetaData = AliasMetaData.builder("foo").build(); + openMapBuilder.put("index", Arrays.asList(aliasMetaData)); + final String[] aliasPattern; + if (randomBoolean()) { + aliasPattern = new String[] { "missing", "foo*", "-foob*" }; + } else { + aliasPattern = new String[] { "foo*", "-foob*", "missing" }; + } + + final RestResponse restResponse = RestGetAliasesAction.buildRestResponse(true, aliasPattern, openMapBuilder.build(), + xContentBuilder); + assertThat(restResponse.status(), equalTo(NOT_FOUND)); + assertThat(restResponse.contentType(), equalTo("application/json; charset=UTF-8")); + assertThat(restResponse.content().utf8ToString(), + equalTo("{\"error\":\"alias [missing] missing\",\"status\":404,\"index\":{\"aliases\":{\"foo\":{}}}}")); + } + + public void testAliasWildcardsExcludeExplicitMissing() throws Exception { + final XContentBuilder xContentBuilder = XContentFactory.contentBuilder(XContentType.JSON); + final ImmutableOpenMap.Builder> openMapBuilder = ImmutableOpenMap.builder(); + final RestResponse restResponse = RestGetAliasesAction.buildRestResponse(true, new String[] { "foo", "foofoo", "-foo*" }, + openMapBuilder.build(), xContentBuilder); + assertThat(restResponse.status(), equalTo(OK)); + assertThat(restResponse.contentType(), equalTo("application/json; charset=UTF-8")); + assertThat(restResponse.content().utf8ToString(), equalTo("{}")); + } +} diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateActionTests.java index ac0eb8f0d81a6..d1900aaee84d5 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestPutIndexTemplateActionTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest.action.admin.indices; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -31,8 +32,12 @@ import org.junit.Before; import java.io.IOException; +import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.rest.BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER; +import static org.mockito.Mockito.mock; + public class RestPutIndexTemplateActionTests extends RestActionTestCase { private RestPutIndexTemplateAction action; @@ -45,7 +50,8 @@ public void testPrepareTypelessRequest() throws IOException { XContentBuilder content = XContentFactory.jsonBuilder().startObject() .startObject("mappings") .startObject("properties") - .startObject("field").field("type", "keyword").endObject() + .startObject("field1").field("type", "keyword").endObject() + .startObject("field2").field("type", "text").endObject() .endObject() .endObject() .startObject("aliases") @@ -58,6 +64,11 @@ public void testPrepareTypelessRequest() throws IOException { .withPath("/_template/_some_template") .withContent(BytesReference.bytes(content), XContentType.JSON) .build(); + action.prepareRequest(request, mock(NodeClient.class)); + + // Internally the above prepareRequest method calls prepareRequestSource to inject a + // default type into the mapping. Here we test that this does what is expected by + // explicitly calling that same helper function boolean includeTypeName = false; Map source = action.prepareRequestSource(request, includeTypeName); @@ -65,7 +76,8 @@ public void testPrepareTypelessRequest() throws IOException { .startObject("mappings") .startObject("_doc") .startObject("properties") - .startObject("field").field("type", "keyword").endObject() + .startObject("field1").field("type", "keyword").endObject() + .startObject("field2").field("type", "text").endObject() .endObject() .endObject() .endObject() @@ -78,4 +90,51 @@ public void testPrepareTypelessRequest() throws IOException { assertEquals(expectedContentAsMap, source); } + + public void testIncludeTypeName() throws IOException { + XContentBuilder typedContent = XContentFactory.jsonBuilder().startObject() + .startObject("mappings") + .startObject("my_doc") + .startObject("properties") + .startObject("field1").field("type", "keyword").endObject() + .startObject("field2").field("type", "text").endObject() + .endObject() + .endObject() + .endObject() + .startObject("aliases") + .startObject("read_alias").endObject() + .endObject() + .endObject(); + + Map params = new HashMap<>(); + params.put(INCLUDE_TYPE_NAME_PARAMETER, "true"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.PUT) + .withParams(params) + .withPath("/_template/_some_template") + .withContent(BytesReference.bytes(typedContent), XContentType.JSON) + .build(); + action.prepareRequest(request, mock(NodeClient.class)); + assertWarnings(RestPutIndexTemplateAction.TYPES_DEPRECATION_MESSAGE); + boolean includeTypeName = true; + Map source = action.prepareRequestSource(request, includeTypeName); + + XContentBuilder expectedContent = XContentFactory.jsonBuilder().startObject() + .startObject("mappings") + .startObject("my_doc") + .startObject("properties") + .startObject("field1").field("type", "keyword").endObject() + .startObject("field2").field("type", "text").endObject() + .endObject() + .endObject() + .endObject() + .startObject("aliases") + .startObject("read_alias").endObject() + .endObject() + .endObject(); + Map expectedContentAsMap = XContentHelper.convertToMap( + BytesReference.bytes(expectedContent), true, expectedContent.contentType()).v2(); + + assertEquals(expectedContentAsMap, source); + } } diff --git a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java index 4831729201183..bbd5b12ec45ee 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,7 +40,7 @@ import org.elasticsearch.search.SearchHit.NestedIdentity; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.fetch.subphase.highlight.HighlightFieldTests; -import org.elasticsearch.test.AbstractStreamableTestCase; +import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.RandomObjects; import org.elasticsearch.test.VersionUtils; @@ -59,7 +60,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class SearchHitTests extends AbstractStreamableTestCase { +public class SearchHitTests extends AbstractWireSerializingTestCase { public static SearchHit createTestItem(boolean withOptionalInnerHits, boolean withShardTarget) { return createTestItem(randomFrom(XContentType.values()), withOptionalInnerHits, withShardTarget); } @@ -139,8 +140,8 @@ public static SearchHit createTestItem(XContentType xContentType, boolean withOp } @Override - protected SearchHit createBlankInstance() { - return new SearchHit(); + protected Writeable.Reader instanceReader() { + return SearchHit::new; } @Override @@ -246,7 +247,7 @@ public void testSerializeShardTarget() throws Exception { SearchHits hits = new SearchHits(new SearchHit[]{hit1, hit2}, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1f); Version version = VersionUtils.randomVersion(random()); - SearchHits results = copyStreamable(hits, getNamedWriteableRegistry(), SearchHits::new, version); + SearchHits results = copyWriteable(hits, getNamedWriteableRegistry(), SearchHits::new, version); SearchShardTarget deserializedTarget = results.getAt(0).getShard(); assertThat(deserializedTarget, equalTo(target)); assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); diff --git a/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java b/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java index 396879e8f65bd..9e87628d35d1c 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.LuceneTests; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -37,7 +38,7 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.VersionUtils; import java.io.IOException; @@ -45,7 +46,7 @@ import java.util.Collections; import java.util.function.Predicate; -public class SearchHitsTests extends AbstractStreamableXContentTestCase { +public class SearchHitsTests extends AbstractSerializingTestCase { public static SearchHits createTestItem(boolean withOptionalInnerHits, boolean withShardTarget) { return createTestItem(randomFrom(XContentType.values()), withOptionalInnerHits, withShardTarget); @@ -171,8 +172,8 @@ protected String[] getShuffleFieldsExceptions() { } @Override - protected SearchHits createBlankInstance() { - return new SearchHits(); + protected Writeable.Reader instanceReader() { + return SearchHits::new; } @Override @@ -274,8 +275,7 @@ public void testFromXContentWithShards() throws IOException { public void testReadFromPre6_6_0() throws IOException { try (StreamInput in = StreamInput.wrap(Base64.getDecoder().decode("AQC/gAAAAAA="))) { in.setVersion(VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_6_6_0))); - SearchHits searchHits = new SearchHits(); - searchHits.readFrom(in); + SearchHits searchHits = new SearchHits(in); assertEquals(0, searchHits.getHits().length); assertNotNull(searchHits.getTotalHits()); assertEquals(0L, searchHits.getTotalHits().value); diff --git a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index fc69df5987aff..81315071273e0 100644 --- a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.fields; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -49,7 +48,6 @@ import org.elasticsearch.test.InternalSettingsPlugin; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; -import org.joda.time.ReadableDateTime; import org.joda.time.format.DateTimeFormat; import java.time.ZoneOffset; @@ -804,13 +802,12 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo((Object) 4L)); assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo((Object) 5.0)); assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); - DateTime dateField = searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(); - assertThat(dateField.getMillis(), equalTo(date.toInstant().toEpochMilli())); + assertThat(searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(DateFormatter.forPattern("dateOptionalTime").format(date))); assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), - equalTo(new BytesRef(new byte[] {42, 100}))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); builder = client().prepareSearch().setQuery(matchAllQuery()) @@ -830,13 +827,12 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo((Object) 4L)); assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo((Object) 5.0)); assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); - dateField = searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(); - assertThat(dateField.getMillis(), equalTo(date.toInstant().toEpochMilli())); + assertThat(searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), + equalTo(DateFormatter.forPattern("dateOptionalTime").format(date))); assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); - assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), - equalTo(new BytesRef(new byte[] {42, 100}))); + assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); builder = client().prepareSearch().setQuery(matchAllQuery()) @@ -1001,9 +997,7 @@ public void testDocValueFieldsWithFieldAlias() throws Exception { DocumentField dateField = fields.get("date_field"); assertThat(dateField.getName(), equalTo("date_field")); - - ReadableDateTime fetchedDate = dateField.getValue(); - assertThat(fetchedDate.getMillis(), equalTo(date.toInstant().getMillis())); + assertThat(dateField.getValue(), equalTo("1990-12-29")); } public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { @@ -1065,9 +1059,7 @@ public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { DocumentField dateField = fields.get("date_field"); assertThat(dateField.getName(), equalTo("date_field")); - - ReadableDateTime fetchedDate = dateField.getValue(); - assertThat(fetchedDate.getMillis(), equalTo(date.toInstant().getMillis())); + assertThat(dateField.getValue(), equalTo("1990-12-29")); } diff --git a/settings.gradle b/settings.gradle index d5eebd6f66fe2..18f5f63332e00 100644 --- a/settings.gradle +++ b/settings.gradle @@ -16,10 +16,12 @@ List projects = [ 'client:benchmark', 'benchmarks', 'distribution:archives:integ-test-zip', - 'distribution:archives:oss-zip', - 'distribution:archives:zip', - 'distribution:archives:oss-tar', - 'distribution:archives:tar', + 'distribution:archives:oss-windows-zip', + 'distribution:archives:windows-zip', + 'distribution:archives:oss-darwin-tar', + 'distribution:archives:darwin-tar', + 'distribution:archives:oss-linux-tar', + 'distribution:archives:linux-tar', 'distribution:docker', 'distribution:packages:oss-deb', 'distribution:packages:deb', diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 1ff1fa37fc120..469a4cfb4d55e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -406,6 +406,16 @@ public boolean addSendBehavior(TransportAddress transportAddress, StubbableTrans return transport().addSendBehavior(transportAddress, sendBehavior); } + /** + * Adds a send behavior that is the default send behavior. + * + * @return {@code true} if no default send behavior was registered + */ + public boolean addSendBehavior(StubbableTransport.SendRequestBehavior behavior) { + return transport().setDefaultSendBehavior(behavior); + } + + /** * Adds a new connect behavior that is used for creating connections with the given delegate service. * diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java index 994037be07a92..41ac87f0af576 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableConnectionManager.java @@ -69,7 +69,9 @@ public boolean setDefaultNodeConnectedBehavior(NodeConnectedBehavior behavior) { } public void clearBehaviors() { + defaultGetConnectionBehavior = ConnectionManager::getConnection; getConnectionBehaviors.clear(); + defaultNodeConnectedBehavior = ConnectionManager::nodeConnected; nodeConnectedBehaviors.clear(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java index 4f0df85fd50c8..673ed49387570 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java @@ -55,6 +55,12 @@ public StubbableTransport(Transport transport) { this.delegate = transport; } + boolean setDefaultSendBehavior(SendRequestBehavior sendBehavior) { + SendRequestBehavior prior = defaultSendRequest; + defaultSendRequest = sendBehavior; + return prior == null; + } + public boolean setDefaultConnectBehavior(OpenConnectionBehavior openConnectionBehavior) { OpenConnectionBehavior prior = this.defaultConnectBehavior; this.defaultConnectBehavior = openConnectionBehavior; @@ -70,7 +76,9 @@ boolean addConnectBehavior(TransportAddress transportAddress, OpenConnectionBeha } void clearBehaviors() { + this.defaultSendRequest = null; sendBehaviors.clear(); + this.defaultConnectBehavior = null; connectBehaviors.clear(); } diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/XPackUsageIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/XPackUsageIT.java new file mode 100644 index 0000000000000..84271ce0acaf1 --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/XPackUsageIT.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; + +import java.io.IOException; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.nullValue; + +public class XPackUsageIT extends ESCCRRestTestCase { + + public void testXPackCcrUsage() throws Exception { + if ("follow".equals(targetCluster) == false) { + logger.info("skipping test, waiting for target cluster [follow]" ); + return; + } + + Map previousUsage = getCcrUsage(); + putAutoFollowPattern("my_pattern", "leader_cluster", "messages-*"); + + // This index should be auto followed: + createLeaderIndex("messages-20200101"); + // This index will be followed manually + createLeaderIndex("my_index"); + followIndex("my_index", "my_index"); + + int previousFollowerIndicesCount = (Integer) previousUsage.get("follower_indices_count"); + int previousAutoFollowPatternsCount = (Integer) previousUsage.get("auto_follow_patterns_count"); + assertBusy(() -> { + Map ccrUsage = getCcrUsage(); + assertThat(ccrUsage.get("follower_indices_count"), equalTo(previousFollowerIndicesCount + 2)); + assertThat(ccrUsage.get("auto_follow_patterns_count"), equalTo(previousAutoFollowPatternsCount + 1)); + assertThat((Integer) ccrUsage.get("last_follow_time_in_millis"), greaterThanOrEqualTo(0)); + }); + + deleteAutoFollowPattern("my_pattern"); + pauseFollow("messages-20200101"); + closeIndex("messages-20200101"); + unfollow("messages-20200101"); + + pauseFollow("my_index"); + closeIndex("my_index"); + unfollow("my_index"); + + assertBusy(() -> { + Map ccrUsage = getCcrUsage(); + assertThat(ccrUsage.get("follower_indices_count"), equalTo(previousFollowerIndicesCount)); + assertThat(ccrUsage.get("auto_follow_patterns_count"), equalTo(previousAutoFollowPatternsCount)); + if (previousFollowerIndicesCount == 0) { + assertThat(ccrUsage.get("last_follow_time_in_millis"), nullValue()); + } else { + assertThat((Integer) ccrUsage.get("last_follow_time_in_millis"), greaterThanOrEqualTo(0)); + } + }); + } + + private void createLeaderIndex(String indexName) throws IOException { + try (RestClient leaderClient = buildLeaderClient()) { + Settings settings = Settings.builder() + .put("index.soft_deletes.enabled", true) + .build(); + Request request = new Request("PUT", "/" + indexName); + request.setJsonEntity("{\"settings\": " + Strings.toString(settings) + "}"); + assertOK(leaderClient.performRequest(request)); + } + } + + private Map getCcrUsage() throws IOException { + Request request = new Request("GET", "/_xpack/usage"); + Map response = toMap(client().performRequest(request)); + logger.info("xpack usage response={}", response); + return (Map) response.get("ccr"); + } + +} diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml index f73f5c6dfb2d3..d5cd8ebd4f1ab 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml @@ -37,6 +37,7 @@ - do: ccr.follow: index: bar + wait_for_active_shards: 1 body: remote_cluster: local leader_index: foo diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_info.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_info.yml index f1e47d830cf97..8383ecd4e6851 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_info.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_info.yml @@ -33,6 +33,7 @@ - do: ccr.follow: index: bar + wait_for_active_shards: 1 body: remote_cluster: local leader_index: foo diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml index 5b3e6c18ef29b..220463a60b258 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml @@ -36,6 +36,7 @@ - do: ccr.follow: index: bar + wait_for_active_shards: 1 body: remote_cluster: local leader_index: foo diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/index_directly_into_follower_index.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/index_directly_into_follower_index.yml index 60c3b404b6f09..62878437c37e3 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/index_directly_into_follower_index.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/index_directly_into_follower_index.yml @@ -37,6 +37,7 @@ - do: ccr.follow: index: bar + wait_for_active_shards: 1 body: remote_cluster: local leader_index: foo diff --git a/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java index 25fbef7ada73e..656328d5ead9e 100644 --- a/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java +++ b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java @@ -73,7 +73,7 @@ protected static void followIndex(String leaderCluster, String leaderIndex, Stri } protected static void followIndex(RestClient client, String leaderCluster, String leaderIndex, String followIndex) throws IOException { - final Request request = new Request("PUT", "/" + followIndex + "/_ccr/follow"); + final Request request = new Request("PUT", "/" + followIndex + "/_ccr/follow?wait_for_active_shards=1"); request.setJsonEntity("{\"remote_cluster\": \"" + leaderCluster + "\", \"leader_index\": \"" + leaderIndex + "\", \"read_poll_timeout\": \"10ms\"}"); assertOK(client.performRequest(request)); @@ -87,6 +87,22 @@ protected static void pauseFollow(RestClient client, String followIndex) throws assertOK(client.performRequest(new Request("POST", "/" + followIndex + "/_ccr/pause_follow"))); } + protected static void putAutoFollowPattern(String patternName, String remoteCluster, String indexPattern) throws IOException { + Request putPatternRequest = new Request("PUT", "/_ccr/auto_follow/" + patternName); + putPatternRequest.setJsonEntity("{\"leader_index_patterns\": [\"" + indexPattern + "\"], \"remote_cluster\": \"" + + remoteCluster + "\"}"); + assertOK(client().performRequest(putPatternRequest)); + } + + protected static void deleteAutoFollowPattern(String patternName) throws IOException { + Request putPatternRequest = new Request("DELETE", "/_ccr/auto_follow/" + patternName); + assertOK(client().performRequest(putPatternRequest)); + } + + protected static void unfollow(String followIndex) throws IOException { + assertOK(client().performRequest(new Request("POST", "/" + followIndex + "/_ccr/unfollow"))); + } + protected static void verifyDocuments(final String index, final int expectedNumDocs, final String query) throws IOException { verifyDocuments(index, expectedNumDocs, query, adminClient()); } @@ -186,6 +202,7 @@ protected static Map toMap(String response) { protected static void ensureYellow(String index) throws IOException { Request request = new Request("GET", "/_cluster/health/" + index); request.addParameter("wait_for_status", "yellow"); + request.addParameter("wait_for_active_shards", "1"); request.addParameter("wait_for_no_relocating_shards", "true"); request.addParameter("wait_for_no_initializing_shards", "true"); request.addParameter("timeout", "70s"); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 1345faaa5bcb0..a7fa69e7abd39 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -81,6 +82,7 @@ import org.elasticsearch.xpack.ccr.rest.RestResumeFollowAction; import org.elasticsearch.xpack.ccr.rest.RestUnfollowAction; import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.ccr.CCRFeatureSet; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; @@ -126,6 +128,7 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E private final SetOnce restoreSourceService = new SetOnce<>(); private final SetOnce ccrSettings = new SetOnce<>(); private Client client; + private final boolean transportClientMode; /** * Construct an instance of the CCR container with the specified settings. @@ -147,6 +150,7 @@ public Ccr(final Settings settings) { this.settings = settings; this.enabled = CCR_ENABLED_SETTING.get(settings); this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + this.transportClientMode = XPackPlugin.transportClientMode(settings); } @Override @@ -314,6 +318,15 @@ public void onIndexModule(IndexModule indexModule) { } } + @Override + public Collection createGuiceModules() { + if (transportClientMode) { + return Collections.emptyList(); + } + + return Collections.singleton(b -> XPackPlugin.bindFeatureSet(b, CCRFeatureSet.class)); + } + protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } @Override diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java index d6262a7711dad..625429dc0abc6 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java @@ -57,6 +57,13 @@ public final class CcrSettings { Setting.timeSetting("ccr.indices.recovery.recovery_activity_timeout", TimeValue.timeValueSeconds(60), Setting.Property.Dynamic, Setting.Property.NodeScope); + /** + * The timeout value to use for requests made as part of ccr recovery process. + * */ + public static final Setting INDICES_RECOVERY_ACTION_TIMEOUT_SETTING = + Setting.positiveTimeSetting("ccr.indices.recovery.internal_action_timeout", TimeValue.timeValueSeconds(60), + Property.Dynamic, Property.NodeScope); + /** * The settings defined by CCR. * @@ -67,6 +74,7 @@ static List> getSettings() { XPackSettings.CCR_ENABLED_SETTING, CCR_FOLLOWING_INDEX_SETTING, RECOVERY_MAX_BYTES_PER_SECOND, + INDICES_RECOVERY_ACTION_TIMEOUT_SETTING, INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, CCR_AUTO_FOLLOW_WAIT_FOR_METADATA_TIMEOUT, CCR_WAIT_FOR_METADATA_TIMEOUT); @@ -74,12 +82,15 @@ static List> getSettings() { private final CombinedRateLimiter ccrRateLimiter; private volatile TimeValue recoveryActivityTimeout; + private volatile TimeValue recoveryActionTimeout; public CcrSettings(Settings settings, ClusterSettings clusterSettings) { this.recoveryActivityTimeout = INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.get(settings); + this.recoveryActionTimeout = INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.get(settings); this.ccrRateLimiter = new CombinedRateLimiter(RECOVERY_MAX_BYTES_PER_SECOND.get(settings)); clusterSettings.addSettingsUpdateConsumer(RECOVERY_MAX_BYTES_PER_SECOND, this::setMaxBytesPerSec); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setRecoveryActivityTimeout); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTION_TIMEOUT_SETTING, this::setRecoveryActionTimeout); } private void setMaxBytesPerSec(ByteSizeValue maxBytesPerSec) { @@ -90,6 +101,10 @@ private void setRecoveryActivityTimeout(TimeValue recoveryActivityTimeout) { this.recoveryActivityTimeout = recoveryActivityTimeout; } + private void setRecoveryActionTimeout(TimeValue recoveryActionTimeout) { + this.recoveryActionTimeout = recoveryActionTimeout; + } + public CombinedRateLimiter getRateLimiter() { return ccrRateLimiter; } @@ -97,4 +112,8 @@ public CombinedRateLimiter getRateLimiter() { public TimeValue getRecoveryActivityTimeout() { return recoveryActivityTimeout; } + + public TimeValue getRecoveryActionTimeout() { + return recoveryActionTimeout; + } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index bca8608904a76..27f3b60fb5291 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -6,52 +6,53 @@ package org.elasticsearch.xpack.ccr.action; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.ResourceAlreadyExistsException; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreClusterStateListener; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.ActiveShardsObserver; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.routing.RoutingTable; -import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.snapshots.RestoreInfo; +import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrLicenseChecker; import org.elasticsearch.xpack.ccr.CcrSettings; +import org.elasticsearch.xpack.ccr.repository.CcrRepository; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; import org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; import java.util.Objects; public final class TransportPutFollowAction - extends TransportMasterNodeAction { + extends TransportMasterNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportPutFollowAction.class); private final Client client; - private final AllocationService allocationService; - private final ActiveShardsObserver activeShardsObserver; + private final RestoreService restoreService; private final CcrLicenseChecker ccrLicenseChecker; + private final ActiveShardsObserver activeShardsObserver; @Inject public TransportPutFollowAction( @@ -61,7 +62,7 @@ public TransportPutFollowAction( final ActionFilters actionFilters, final IndexNameExpressionResolver indexNameExpressionResolver, final Client client, - final AllocationService allocationService, + final RestoreService restoreService, final CcrLicenseChecker ccrLicenseChecker) { super( PutFollowAction.NAME, @@ -72,9 +73,9 @@ public TransportPutFollowAction( PutFollowAction.Request::new, indexNameExpressionResolver); this.client = client; - this.allocationService = allocationService; - this.activeShardsObserver = new ActiveShardsObserver(clusterService, threadPool); + this.restoreService = restoreService; this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + this.activeShardsObserver = new ActiveShardsObserver(clusterService, threadPool); } @Override @@ -96,7 +97,7 @@ protected PutFollowAction.Response read(StreamInput in) throws IOException { protected void masterOperation( final PutFollowAction.Request request, final ClusterState state, - final ActionListener listener) throws Exception { + final ActionListener listener) { if (ccrLicenseChecker.isCcrAllowed() == false) { listener.onFailure(LicenseUtils.newComplianceException("ccr")); return; @@ -111,12 +112,11 @@ protected void masterOperation( remoteCluster, leaderIndex, listener::onFailure, - (historyUUID, leaderIndexMetaData) -> createFollowerIndex(leaderIndexMetaData, historyUUID, request, listener)); + (historyUUID, leaderIndexMetaData) -> createFollowerIndex(leaderIndexMetaData, request, listener)); } private void createFollowerIndex( final IndexMetaData leaderIndexMetaData, - final String[] historyUUIDs, final PutFollowAction.Request request, final ActionListener listener) { if (leaderIndexMetaData == null) { @@ -131,98 +131,107 @@ private void createFollowerIndex( return; } - ActionListener handler = ActionListener.wrap( - result -> { - if (result) { - initiateFollowing(request, listener); - } else { - listener.onResponse(new PutFollowAction.Response(true, false, false)); - } - }, - listener::onFailure); - // Can't use create index api here, because then index templates can alter the mappings / settings. - // And index templates could introduce settings / mappings that are incompatible with the leader index. - clusterService.submitStateUpdateTask("create_following_index", new AckedClusterStateUpdateTask(request, handler) { + final Settings.Builder settingsBuilder = Settings.builder() + .put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, request.getFollowRequest().getFollowerIndex()) + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); + final String leaderClusterRepoName = CcrRepository.NAME_PREFIX + request.getRemoteCluster(); + final RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST) + .indices(request.getLeaderIndex()).indicesOptions(request.indicesOptions()).renamePattern("^(.*)$") + .renameReplacement(request.getFollowRequest().getFollowerIndex()).masterNodeTimeout(request.masterNodeTimeout()) + .indexSettings(settingsBuilder); + + final Client clientWithHeaders = CcrLicenseChecker.wrapClient(this.client, threadPool.getThreadContext().getHeaders()); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() { @Override - protected Boolean newResponse(final boolean acknowledged) { - return acknowledged; + public void onFailure(Exception e) { + listener.onFailure(e); } @Override - public ClusterState execute(final ClusterState currentState) throws Exception { - String followIndex = request.getFollowRequest().getFollowerIndex(); - IndexMetaData currentIndex = currentState.metaData().index(followIndex); - if (currentIndex != null) { - throw new ResourceAlreadyExistsException(currentIndex.getIndex()); - } + protected void doRun() throws Exception { + restoreService.restoreSnapshot(restoreRequest, new ActionListener() { - MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); - IndexMetaData.Builder imdBuilder = IndexMetaData.builder(followIndex); - - // Adding the leader index uuid for each shard as custom metadata: - Map metadata = new HashMap<>(); - metadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS, String.join(",", historyUUIDs)); - metadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY, leaderIndexMetaData.getIndexUUID()); - metadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY, leaderIndexMetaData.getIndex().getName()); - metadata.put(Ccr.CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY, request.getRemoteCluster()); - imdBuilder.putCustom(Ccr.CCR_CUSTOM_METADATA_KEY, metadata); - - // Copy all settings, but overwrite a few settings. - Settings.Builder settingsBuilder = Settings.builder(); - settingsBuilder.put(leaderIndexMetaData.getSettings()); - // Overwriting UUID here, because otherwise we can't follow indices in the same cluster - settingsBuilder.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); - settingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followIndex); - settingsBuilder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - settingsBuilder.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); - imdBuilder.settings(settingsBuilder); - - // Copy mappings from leader IMD to follow IMD - for (ObjectObjectCursor cursor : leaderIndexMetaData.getMappings()) { - imdBuilder.putMapping(cursor.value); - } - imdBuilder.setRoutingNumShards(leaderIndexMetaData.getRoutingNumShards()); - IndexMetaData followIMD = imdBuilder.build(); - mdBuilder.put(followIMD, false); + @Override + public void onResponse(RestoreService.RestoreCompletionResponse response) { + afterRestoreStarted(clientWithHeaders, request, listener, response); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + }); + } - ClusterState.Builder builder = ClusterState.builder(currentState); - builder.metaData(mdBuilder.build()); - ClusterState updatedState = builder.build(); + private void afterRestoreStarted(Client clientWithHeaders, PutFollowAction.Request request, + ActionListener originalListener, + RestoreService.RestoreCompletionResponse response) { + final ActionListener listener; + if (ActiveShardCount.NONE.equals(request.waitForActiveShards())) { + originalListener.onResponse(new PutFollowAction.Response(true, false, false)); + listener = new ActionListener() { + + @Override + public void onResponse(PutFollowAction.Response response) { + logger.debug("put follow {} completed with {}", request, response); + } - RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()) - .addAsNew(updatedState.metaData().index(request.getFollowRequest().getFollowerIndex())); - updatedState = allocationService.reroute( - ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), - "follow index [" + request.getFollowRequest().getFollowerIndex() + "] created"); + @Override + public void onFailure(Exception e) { + logger.debug(() -> new ParameterizedMessage("put follow {} failed during the restore process", request), e); + } + }; + } else { + listener = originalListener; + } - logger.info("[{}] creating index, cause [ccr_create_and_follow], shards [{}]/[{}]", - followIndex, followIMD.getNumberOfShards(), followIMD.getNumberOfReplicas()); + RestoreClusterStateListener.createAndRegisterListener(clusterService, response, new ActionListener() { + @Override + public void onResponse(RestoreSnapshotResponse restoreSnapshotResponse) { + RestoreInfo restoreInfo = restoreSnapshotResponse.getRestoreInfo(); + + if (restoreInfo == null) { + // If restoreInfo is null then it is possible there was a master failure during the + // restore. + listener.onResponse(new PutFollowAction.Response(true, false, false)); + } else if (restoreInfo.failedShards() == 0) { + initiateFollowing(clientWithHeaders, request, listener); + } else { + assert restoreInfo.failedShards() > 0 : "Should have failed shards"; + listener.onResponse(new PutFollowAction.Response(true, false, false)); + } + } - return updatedState; + @Override + public void onFailure(Exception e) { + listener.onFailure(e); } }); } private void initiateFollowing( - final PutFollowAction.Request request, - final ActionListener listener) { + final Client client, + final PutFollowAction.Request request, + final ActionListener listener) { + assert request.waitForActiveShards() != ActiveShardCount.DEFAULT : "PutFollowAction does not support DEFAULT."; activeShardsObserver.waitForActiveShards(new String[]{request.getFollowRequest().getFollowerIndex()}, - ActiveShardCount.DEFAULT, request.timeout(), result -> { - if (result) { - client.execute(ResumeFollowAction.INSTANCE, request.getFollowRequest(), ActionListener.wrap( - r -> listener.onResponse(new PutFollowAction.Response(true, true, r.isAcknowledged())), - listener::onFailure - )); - } else { - listener.onResponse(new PutFollowAction.Response(true, false, false)); - } - }, listener::onFailure); + request.waitForActiveShards(), request.timeout(), result -> { + if (result) { + client.execute(ResumeFollowAction.INSTANCE, request.getFollowRequest(), ActionListener.wrap( + r -> listener.onResponse(new PutFollowAction.Response(true, true, r.isAcknowledged())), + listener::onFailure + )); + } else { + listener.onResponse(new PutFollowAction.Response(true, false, false)); + } + }, listener::onFailure); } @Override protected ClusterBlockException checkBlock(final PutFollowAction.Request request, final ClusterState state) { return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.getFollowRequest().getFollowerIndex()); } - } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 33a8c64c96138..bcf0e5f6dc6e9 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -6,6 +6,8 @@ package org.elasticsearch.xpack.ccr.repository; +import com.carrotsearch.hppc.cursors.IntObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.lucene.index.IndexCommit; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; @@ -81,6 +83,7 @@ public class CcrRepository extends AbstractLifecycleComponent implements Reposit public static final String TYPE = "_ccr_"; public static final String NAME_PREFIX = "_ccr_"; private static final SnapshotId SNAPSHOT_ID = new SnapshotId(LATEST, LATEST); + private static final String IN_SYNC_ALLOCATION_ID = "ccr_restore"; private final RepositoryMetaData metadata; private final CcrSettings ccrSettings; @@ -124,7 +127,8 @@ public RepositoryMetaData getMetadata() { public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) { assert SNAPSHOT_ID.equals(snapshotId) : "RemoteClusterRepository only supports " + SNAPSHOT_ID + " as the SnapshotId"; Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); - ClusterStateResponse response = remoteClient.admin().cluster().prepareState().clear().setMetaData(true).setNodes(true).get(); + ClusterStateResponse response = remoteClient.admin().cluster().prepareState().clear().setMetaData(true).setNodes(true) + .get(ccrSettings.getRecoveryActionTimeout()); ImmutableOpenMap indicesMap = response.getState().metaData().indices(); ArrayList indices = new ArrayList<>(indicesMap.size()); indicesMap.keysIt().forEachRemaining(indices::add); @@ -138,7 +142,8 @@ public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) { Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); // We set a single dummy index name to avoid fetching all the index data ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest("dummy_index_name"); - ClusterStateResponse clusterState = remoteClient.admin().cluster().state(clusterStateRequest).actionGet(); + ClusterStateResponse clusterState = remoteClient.admin().cluster().state(clusterStateRequest) + .actionGet(ccrSettings.getRecoveryActionTimeout()); return clusterState.getState().metaData(); } @@ -149,15 +154,16 @@ public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId ind Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest(leaderIndex); - ClusterStateResponse clusterState = remoteClient.admin().cluster().state(clusterStateRequest).actionGet(); + ClusterStateResponse clusterState = remoteClient.admin().cluster().state(clusterStateRequest) + .actionGet(ccrSettings.getRecoveryActionTimeout()); // Validates whether the leader cluster has been configured properly: PlainActionFuture future = PlainActionFuture.newFuture(); IndexMetaData leaderIndexMetaData = clusterState.getState().metaData().index(leaderIndex); ccrLicenseChecker.fetchLeaderHistoryUUIDs(remoteClient, leaderIndexMetaData, future::onFailure, future::onResponse); - String[] leaderHistoryUUIDs = future.actionGet(); + String[] leaderHistoryUUIDs = future.actionGet(ccrSettings.getRecoveryActionTimeout()); - IndexMetaData.Builder imdBuilder = IndexMetaData.builder(leaderIndexMetaData); + IndexMetaData.Builder imdBuilder = IndexMetaData.builder(leaderIndex); // Adding the leader index uuid for each shard as custom metadata: Map metadata = new HashMap<>(); metadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS, String.join(",", leaderHistoryUUIDs)); @@ -166,13 +172,27 @@ public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId ind metadata.put(Ccr.CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY, remoteClusterAlias); imdBuilder.putCustom(Ccr.CCR_CUSTOM_METADATA_KEY, metadata); + imdBuilder.settings(leaderIndexMetaData.getSettings()); + + // Copy mappings from leader IMD to follow IMD + for (ObjectObjectCursor cursor : leaderIndexMetaData.getMappings()) { + imdBuilder.putMapping(cursor.value); + } + + imdBuilder.setRoutingNumShards(leaderIndexMetaData.getRoutingNumShards()); + // We assert that insync allocation ids are not empty in `PrimaryShardAllocator` + for (IntObjectCursor> entry : leaderIndexMetaData.getInSyncAllocationIds()) { + imdBuilder.putInSyncAllocationIds(entry.key, Collections.singleton(IN_SYNC_ALLOCATION_ID)); + } + return imdBuilder.build(); } @Override public RepositoryData getRepositoryData() { Client remoteClient = client.getRemoteClusterClient(remoteClusterAlias); - ClusterStateResponse response = remoteClient.admin().cluster().prepareState().clear().setMetaData(true).get(); + ClusterStateResponse response = remoteClient.admin().cluster().prepareState().clear().setMetaData(true) + .get(ccrSettings.getRecoveryActionTimeout()); MetaData remoteMetaData = response.getState().getMetaData(); Map copiedSnapshotIds = new HashMap<>(); @@ -282,7 +302,8 @@ public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Ve private void maybeUpdateMappings(Client localClient, Client remoteClient, Index leaderIndex, IndexSettings followerIndexSettings) { ClusterStateRequest clusterStateRequest = CcrRequests.metaDataRequest(leaderIndex.getName()); - ClusterStateResponse clusterState = remoteClient.admin().cluster().state(clusterStateRequest).actionGet(); + ClusterStateResponse clusterState = remoteClient.admin().cluster().state(clusterStateRequest) + .actionGet(ccrSettings.getRecoveryActionTimeout()); IndexMetaData leaderIndexMetadata = clusterState.getState().metaData().getIndexSafe(leaderIndex); long leaderMappingVersion = leaderIndexMetadata.getMappingVersion(); @@ -290,7 +311,7 @@ private void maybeUpdateMappings(Client localClient, Client remoteClient, Index Index followerIndex = followerIndexSettings.getIndex(); MappingMetaData mappingMetaData = leaderIndexMetadata.mapping(); PutMappingRequest putMappingRequest = CcrRequests.putMappingRequest(followerIndex.getName(), mappingMetaData); - localClient.admin().indices().putMapping(putMappingRequest).actionGet(); + localClient.admin().indices().putMapping(putMappingRequest).actionGet(ccrSettings.getRecoveryActionTimeout()); } } @@ -298,9 +319,9 @@ private RestoreSession openSession(String repositoryName, Client remoteClient, S RecoveryState recoveryState) { String sessionUUID = UUIDs.randomBase64UUID(); PutCcrRestoreSessionAction.PutCcrRestoreSessionResponse response = remoteClient.execute(PutCcrRestoreSessionAction.INSTANCE, - new PutCcrRestoreSessionRequest(sessionUUID, leaderShardId)).actionGet(); + new PutCcrRestoreSessionRequest(sessionUUID, leaderShardId)).actionGet(ccrSettings.getRecoveryActionTimeout()); return new RestoreSession(repositoryName, remoteClient, sessionUUID, response.getNode(), indexShard, recoveryState, - response.getStoreFileMetaData(), ccrSettings.getRateLimiter(), throttledTime::inc); + response.getStoreFileMetaData(), ccrSettings, throttledTime::inc); } private static class RestoreSession extends FileRestoreContext implements Closeable { @@ -311,18 +332,18 @@ private static class RestoreSession extends FileRestoreContext implements Closea private final String sessionUUID; private final DiscoveryNode node; private final Store.MetadataSnapshot sourceMetaData; - private final CombinedRateLimiter rateLimiter; + private final CcrSettings ccrSettings; private final LongConsumer throttleListener; RestoreSession(String repositoryName, Client remoteClient, String sessionUUID, DiscoveryNode node, IndexShard indexShard, - RecoveryState recoveryState, Store.MetadataSnapshot sourceMetaData, CombinedRateLimiter rateLimiter, + RecoveryState recoveryState, Store.MetadataSnapshot sourceMetaData, CcrSettings ccrSettings, LongConsumer throttleListener) { super(repositoryName, indexShard, SNAPSHOT_ID, recoveryState, BUFFER_SIZE); this.remoteClient = remoteClient; this.sessionUUID = sessionUUID; this.node = node; this.sourceMetaData = sourceMetaData; - this.rateLimiter = rateLimiter; + this.ccrSettings = ccrSettings; this.throttleListener = throttleListener; } @@ -338,14 +359,14 @@ void restoreFiles() throws IOException { @Override protected InputStream fileInputStream(BlobStoreIndexShardSnapshot.FileInfo fileInfo) { - return new RestoreFileInputStream(remoteClient, sessionUUID, node, fileInfo.metadata(), rateLimiter, throttleListener); + return new RestoreFileInputStream(remoteClient, sessionUUID, node, fileInfo.metadata(), ccrSettings, throttleListener); } @Override public void close() { ClearCcrRestoreSessionRequest clearRequest = new ClearCcrRestoreSessionRequest(sessionUUID, node); ClearCcrRestoreSessionAction.ClearCcrRestoreSessionResponse response = - remoteClient.execute(ClearCcrRestoreSessionAction.INSTANCE, clearRequest).actionGet(); + remoteClient.execute(ClearCcrRestoreSessionAction.INSTANCE, clearRequest).actionGet(ccrSettings.getRecoveryActionTimeout()); } } @@ -356,17 +377,19 @@ private static class RestoreFileInputStream extends InputStream { private final DiscoveryNode node; private final StoreFileMetaData fileToRecover; private final CombinedRateLimiter rateLimiter; + private final CcrSettings ccrSettings; private final LongConsumer throttleListener; private long pos = 0; private RestoreFileInputStream(Client remoteClient, String sessionUUID, DiscoveryNode node, StoreFileMetaData fileToRecover, - CombinedRateLimiter rateLimiter, LongConsumer throttleListener) { + CcrSettings ccrSettings, LongConsumer throttleListener) { this.remoteClient = remoteClient; this.sessionUUID = sessionUUID; this.node = node; this.fileToRecover = fileToRecover; - this.rateLimiter = rateLimiter; + this.ccrSettings = ccrSettings; + this.rateLimiter = ccrSettings.getRateLimiter(); this.throttleListener = throttleListener; } @@ -391,7 +414,7 @@ public int read(byte[] bytes, int off, int len) throws IOException { String fileName = fileToRecover.name(); GetCcrRestoreFileChunkRequest request = new GetCcrRestoreFileChunkRequest(node, sessionUUID, fileName, bytesRequested); GetCcrRestoreFileChunkAction.GetCcrRestoreFileChunkResponse response = - remoteClient.execute(GetCcrRestoreFileChunkAction.INSTANCE, request).actionGet(); + remoteClient.execute(GetCcrRestoreFileChunkAction.INSTANCE, request).actionGet(ccrSettings.getRecoveryActionTimeout()); BytesReference fileChunk = response.getChunk(); int bytesReceived = fileChunk.length(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java index 7b21422cb9867..d7a2edd21d26f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestPutFollowAction.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ccr.rest; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; @@ -38,7 +39,8 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient static Request createRequest(RestRequest restRequest) throws IOException { try (XContentParser parser = restRequest.contentOrSourceParamParser()) { - return Request.fromXContent(parser, restRequest.param("index")); + ActiveShardCount waitForActiveShards = ActiveShardCount.parseString(restRequest.param("wait_for_active_shards")); + return Request.fromXContent(parser, restRequest.param("index"), waitForActiveShards); } } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 65fd80325e716..c4fdeb116ae86 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; @@ -64,7 +65,9 @@ import org.elasticsearch.test.NodeConfigurationSource; import org.elasticsearch.test.TestCluster; import org.elasticsearch.test.discovery.TestZenDiscovery; +import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.nio.MockNioTransportPlugin; import org.elasticsearch.xpack.ccr.CcrSettings; import org.elasticsearch.xpack.ccr.LocalStateCcr; import org.elasticsearch.xpack.ccr.index.engine.FollowingEngine; @@ -119,7 +122,8 @@ public final void startClusters() throws Exception { stopClusters(); Collection> mockPlugins = Arrays.asList(ESIntegTestCase.TestSeedPlugin.class, - TestZenDiscovery.TestPlugin.class, MockHttpTransport.TestPlugin.class, getTestTransportPlugin()); + TestZenDiscovery.TestPlugin.class, MockHttpTransport.TestPlugin.class, MockTransportService.TestPlugin.class, + MockNioTransportPlugin.class); InternalTestCluster leaderCluster = new InternalTestCluster(randomLong(), createTempDir(), true, true, numberOfNodesPerCluster(), numberOfNodesPerCluster(), UUIDs.randomBase64UUID(random()), createNodeConfigurationSource(null), 0, "leader", mockPlugins, @@ -277,8 +281,13 @@ protected final ClusterHealthStatus ensureLeaderGreen(String... indices) { } protected final ClusterHealthStatus ensureFollowerGreen(String... indices) { + return ensureFollowerGreen(false, indices); + } + + protected final ClusterHealthStatus ensureFollowerGreen(boolean waitForNoInitializingShards, String... indices) { logger.info("ensure green follower indices {}", Arrays.toString(indices)); - return ensureColor(clusterGroup.followerCluster, ClusterHealthStatus.GREEN, TimeValue.timeValueSeconds(30), false, indices); + return ensureColor(clusterGroup.followerCluster, ClusterHealthStatus.GREEN, TimeValue.timeValueSeconds(30), + waitForNoInitializingShards, indices); } private ClusterHealthStatus ensureColor(TestCluster testCluster, @@ -411,10 +420,15 @@ protected String getIndexSettings(final int numberOfShards, final int numberOfRe } public static PutFollowAction.Request putFollow(String leaderIndex, String followerIndex) { + return putFollow(leaderIndex, followerIndex, ActiveShardCount.ONE); + } + + public static PutFollowAction.Request putFollow(String leaderIndex, String followerIndex, ActiveShardCount waitForActiveShards) { PutFollowAction.Request request = new PutFollowAction.Request(); request.setRemoteCluster("leader_cluster"); request.setLeaderIndex(leaderIndex); request.setFollowRequest(resumeFollow(followerIndex)); + request.waitForActiveShards(waitForActiveShards); return request; } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java index ad8f545fa9dc0..48531c7d28f9a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -98,6 +99,7 @@ protected PutFollowAction.Request getPutFollowRequest(String leaderIndex, String request.setRemoteCluster("local"); request.setLeaderIndex(leaderIndex); request.setFollowRequest(getResumeFollowRequest(followerIndex)); + request.waitForActiveShards(ActiveShardCount.ONE); return request; } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CCRFeatureSetTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CCRFeatureSetTests.java new file mode 100644 index 0000000000000..b95e8fc7c4008 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CCRFeatureSetTests.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.Version; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; +import org.elasticsearch.xpack.core.ccr.CCRFeatureSet; +import org.junit.Before; +import org.mockito.Mockito; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class CCRFeatureSetTests extends ESTestCase { + + private XPackLicenseState licenseState; + private ClusterService clusterService; + + @Before + public void init() throws Exception { + licenseState = mock(XPackLicenseState.class); + clusterService = mock(ClusterService.class); + } + + public void testAvailable() { + CCRFeatureSet featureSet = new CCRFeatureSet(Settings.EMPTY, licenseState, clusterService); + + when(licenseState.isCcrAllowed()).thenReturn(false); + assertThat(featureSet.available(), equalTo(false)); + + when(licenseState.isCcrAllowed()).thenReturn(true); + assertThat(featureSet.available(), equalTo(true)); + + featureSet = new CCRFeatureSet(Settings.EMPTY, null, clusterService); + assertThat(featureSet.available(), equalTo(false)); + } + + public void testEnabled() { + Settings.Builder settings = Settings.builder().put("xpack.ccr.enabled", false); + CCRFeatureSet featureSet = new CCRFeatureSet(settings.build(), licenseState, clusterService); + assertThat(featureSet.enabled(), equalTo(false)); + + settings = Settings.builder().put("xpack.ccr.enabled", true); + featureSet = new CCRFeatureSet(settings.build(), licenseState, clusterService); + assertThat(featureSet.enabled(), equalTo(true)); + } + + public void testName() { + CCRFeatureSet featureSet = new CCRFeatureSet(Settings.EMPTY, licenseState, clusterService); + assertThat(featureSet.name(), equalTo("ccr")); + } + + public void testNativeCodeInfo() { + CCRFeatureSet featureSet = new CCRFeatureSet (Settings.EMPTY, licenseState, clusterService); + assertNull(featureSet.nativeCodeInfo()); + } + + public void testUsageStats() throws Exception { + MetaData.Builder metaData = MetaData.builder(); + + int numFollowerIndices = randomIntBetween(0, 32); + for (int i = 0; i < numFollowerIndices; i++) { + IndexMetaData.Builder followerIndex = IndexMetaData.builder("follow_index" + i) + .settings(settings(Version.CURRENT).put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)) + .numberOfShards(1) + .numberOfReplicas(0) + .creationDate(i) + .putCustom(Ccr.CCR_CUSTOM_METADATA_KEY, new HashMap<>()); + metaData.put(followerIndex); + } + + // Add a regular index, to check that we do not take that one into account: + IndexMetaData.Builder regularIndex = IndexMetaData.builder("my_index") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .creationDate(numFollowerIndices); + metaData.put(regularIndex); + + int numAutoFollowPatterns = randomIntBetween(0, 32); + Map patterns = new HashMap<>(numAutoFollowPatterns); + for (int i = 0; i < numAutoFollowPatterns; i++) { + AutoFollowMetadata.AutoFollowPattern pattern = new AutoFollowMetadata.AutoFollowPattern("remote_cluser", + Collections.singletonList("logs" + i + "*"), null, null, null, null, null, null, null, null, null, null, null); + patterns.put("pattern" + i, pattern); + } + metaData.putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).metaData(metaData).build(); + Mockito.when(clusterService.state()).thenReturn(clusterState); + + PlainActionFuture future = new PlainActionFuture<>(); + CCRFeatureSet ccrFeatureSet = new CCRFeatureSet(Settings.EMPTY, licenseState, clusterService); + ccrFeatureSet.usage(future); + CCRFeatureSet.Usage ccrUsage = (CCRFeatureSet.Usage) future.get(); + assertThat(ccrUsage.enabled(), equalTo(ccrFeatureSet.enabled())); + assertThat(ccrUsage.available(), equalTo(ccrFeatureSet.available())); + + assertThat(ccrUsage.getNumberOfFollowerIndices(), equalTo(numFollowerIndices)); + assertThat(ccrUsage.getLastFollowTimeInMillis(), greaterThanOrEqualTo(0L)); + assertThat(ccrUsage.getNumberOfAutoFollowPatterns(), equalTo(numAutoFollowPatterns)); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CCRFeatureSetUsageTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CCRFeatureSetUsageTests.java new file mode 100644 index 0000000000000..69e41ffddab43 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CCRFeatureSetUsageTests.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.ccr.CCRFeatureSet; + +public class CCRFeatureSetUsageTests extends AbstractWireSerializingTestCase { + + @Override + protected CCRFeatureSet.Usage createTestInstance() { + return new CCRFeatureSet.Usage(randomBoolean(), randomBoolean(), randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), randomNonNegativeLong()); + } + + @Override + protected Writeable.Reader instanceReader() { + return CCRFeatureSet.Usage::new; + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java index ee02241978cfe..f22857939e0d1 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java @@ -6,6 +6,8 @@ package org.elasticsearch.xpack.ccr; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; @@ -34,8 +36,10 @@ import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.CcrIntegTestCase; +import org.elasticsearch.xpack.ccr.action.repositories.GetCcrRestoreFileChunkAction; import org.elasticsearch.xpack.ccr.repository.CcrRepository; import org.elasticsearch.xpack.ccr.repository.CcrRestoreSourceService; @@ -51,6 +55,7 @@ import static org.elasticsearch.snapshots.RestoreService.restoreInProgress; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; // TODO: Fold this integration test into a more expansive integration test as more bootstrap from remote work // TODO: is completed. @@ -308,6 +313,78 @@ public void testRateLimitingIsEmployed() throws Exception { } } + public void testIndividualActionsTimeout() throws Exception { + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); + TimeValue timeValue = TimeValue.timeValueMillis(100); + settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getKey(), timeValue)); + assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + + String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster"; + String leaderIndex = "index1"; + String followerIndex = "index2"; + + final int numberOfPrimaryShards = randomIntBetween(1, 3); + final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1), + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate(leaderIndex).setSource(leaderIndexSettings, XContentType.JSON)); + ensureLeaderGreen(leaderIndex); + + List transportServices = new ArrayList<>(); + + for (TransportService transportService : getFollowerCluster().getDataOrMasterNodeInstances(TransportService.class)) { + MockTransportService mockTransportService = (MockTransportService) transportService; + transportServices.add(mockTransportService); + mockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (action.equals(GetCcrRestoreFileChunkAction.NAME) == false) { + connection.sendRequest(requestId, action, request, options); + } + }); + } + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + } + + leaderClient().admin().indices().prepareFlush(leaderIndex).setForce(true).setWaitIfOngoing(true).get(); + + Settings.Builder settingsBuilder = Settings.builder() + .put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followerIndex) + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); + RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST) + .indices(leaderIndex).indicesOptions(indicesOptions).renamePattern("^(.*)$") + .renameReplacement(followerIndex).masterNodeTimeout(new TimeValue(1L, TimeUnit.HOURS)) + .indexSettings(settingsBuilder); + + final RestoreService restoreService = getFollowerCluster().getCurrentMasterNodeInstance(RestoreService.class); + final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); + PlainActionFuture future = PlainActionFuture.newFuture(); + restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); + + // Depending on when the timeout occurs this can fail in two ways. If it times-out when fetching + // metadata this will throw an exception. If it times-out when restoring a shard, the shard will + // be marked as failed. Either one is a success for the purpose of this test. + try { + RestoreInfo restoreInfo = future.actionGet(); + assertEquals(0, restoreInfo.successfulShards()); + assertEquals(numberOfPrimaryShards, restoreInfo.failedShards()); + } catch (Exception e) { + assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(ElasticsearchTimeoutException.class)); + } + + + for (MockTransportService transportService : transportServices) { + transportService.clearAllRules(); + } + + settingsRequest = new ClusterUpdateSettingsRequest(); + TimeValue defaultValue = CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getDefault(Settings.EMPTY); + settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING.getKey(), + defaultValue)); + assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37887") public void testFollowerMappingIsUpdated() throws IOException { String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster"; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 648c295efa817..55fcb6ace89fd 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -8,6 +8,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; @@ -30,7 +31,10 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterIndexHealth; +import org.elasticsearch.cluster.health.ClusterShardHealth; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.service.ClusterService; @@ -91,14 +95,12 @@ public class IndexFollowingIT extends CcrIntegTestCase { public void testFollowIndex() throws Exception { final int numberOfPrimaryShards = randomIntBetween(1, 3); - final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1), + int numberOfReplicas = between(0, 1); + final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, numberOfReplicas, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); ensureLeaderYellow("index1"); - final PutFollowAction.Request followRequest = putFollow("index1", "index2"); - followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); - final int firstBatchNumDocs = randomIntBetween(2, 64); logger.info("Indexing [{}] docs as first batch", firstBatchNumDocs); for (int i = 0; i < firstBatchNumDocs; i++) { @@ -106,6 +108,30 @@ public void testFollowIndex() throws Exception { leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); } + boolean waitOnAll = randomBoolean(); + + final PutFollowAction.Request followRequest; + if (waitOnAll) { + followRequest = putFollow("index1", "index2", ActiveShardCount.ALL); + } else { + followRequest = putFollow("index1", "index2", ActiveShardCount.ONE); + } + PutFollowAction.Response response = followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + assertTrue(response.isFollowIndexCreated()); + assertTrue(response.isFollowIndexShardsAcked()); + assertTrue(response.isIndexFollowingStarted()); + + ClusterHealthRequest healthRequest = Requests.clusterHealthRequest("index2").waitForNoRelocatingShards(true); + ClusterIndexHealth indexHealth = followerClient().admin().cluster().health(healthRequest).actionGet().getIndices().get("index2"); + for (ClusterShardHealth shardHealth : indexHealth.getShards().values()) { + if (waitOnAll) { + assertTrue(shardHealth.isPrimaryActive()); + assertEquals(1 + numberOfReplicas, shardHealth.getActiveShards()); + } else { + assertTrue(shardHealth.isPrimaryActive()); + } + } + final Map firstBatchNumDocsPerShard = new HashMap<>(); final ShardStats[] firstBatchShardStats = leaderClient().admin().indices().prepareStats("index1").get().getIndex("index1").getShards(); @@ -152,6 +178,119 @@ public void testFollowIndex() throws Exception { assertMaxSeqNoOfUpdatesIsTransferred(resolveLeaderIndex("index1"), resolveFollowerIndex("index2"), numberOfPrimaryShards); } + public void testFollowIndexWithConcurrentMappingChanges() throws Exception { + final int numberOfPrimaryShards = randomIntBetween(1, 3); + final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1), + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureLeaderYellow("index1"); + + final int firstBatchNumDocs = randomIntBetween(2, 64); + logger.info("Indexing [{}] docs as first batch", firstBatchNumDocs); + for (int i = 0; i < firstBatchNumDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + } + + AtomicBoolean isRunning = new AtomicBoolean(true); + + // Concurrently index new docs with mapping changes + Thread thread = new Thread(() -> { + int docID = 10000; + char[] chars = "abcdeghijklmnopqrstuvwxyz".toCharArray(); + for (char c : chars) { + if (isRunning.get() == false) { + break; + } + final String source; + long valueToPutInDoc = randomLongBetween(0, 50000); + if (randomBoolean()) { + source = String.format(Locale.ROOT, "{\"%c\":%d}", c, valueToPutInDoc); + } else { + source = String.format(Locale.ROOT, "{\"%c\":\"%d\"}", c, valueToPutInDoc); + } + for (int i = 1; i < 10; i++) { + if (isRunning.get() == false) { + break; + } + leaderClient().prepareIndex("index1", "doc", Long.toString(docID++)).setSource(source, XContentType.JSON).get(); + if (rarely()) { + leaderClient().admin().indices().prepareFlush("index1").setForce(true).get(); + } + } + leaderClient().admin().indices().prepareFlush("index1").setForce(true).setWaitIfOngoing(true).get(); + } + }); + thread.start(); + + final PutFollowAction.Request followRequest = putFollow("index1", "index2", ActiveShardCount.NONE); + followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + + ensureFollowerGreen("index2"); + + for (int i = 0; i < firstBatchNumDocs; i++) { + assertBusy(assertExpectedDocumentRunnable(i)); + } + + final int secondBatchNumDocs = randomIntBetween(2, 64); + logger.info("Indexing [{}] docs as second batch", secondBatchNumDocs); + for (int i = firstBatchNumDocs; i < firstBatchNumDocs + secondBatchNumDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + } + + for (int i = firstBatchNumDocs; i < firstBatchNumDocs + secondBatchNumDocs; i++) { + assertBusy(assertExpectedDocumentRunnable(i)); + } + + isRunning.set(false); + thread.join(); + } + + public void testFollowIndexWithoutWaitForComplete() throws Exception { + final int numberOfPrimaryShards = randomIntBetween(1, 3); + final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1), + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureLeaderYellow("index1"); + + final int firstBatchNumDocs = randomIntBetween(2, 64); + logger.info("Indexing [{}] docs as first batch", firstBatchNumDocs); + for (int i = 0; i < firstBatchNumDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + leaderClient().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + } + + final PutFollowAction.Request followRequest = putFollow("index1", "index2", ActiveShardCount.NONE); + PutFollowAction.Response response = followerClient().execute(PutFollowAction.INSTANCE, followRequest).get(); + + assertTrue(response.isFollowIndexCreated()); + assertFalse(response.isFollowIndexShardsAcked()); + assertFalse(response.isIndexFollowingStarted()); + + // Check that the index exists, would throw index not found exception if the index is missing + followerClient().admin().indices().prepareGetIndex().addIndices("index2").get(); + ensureFollowerGreen(true, "index2"); + + final Map firstBatchNumDocsPerShard = new HashMap<>(); + final ShardStats[] firstBatchShardStats = + leaderClient().admin().indices().prepareStats("index1").get().getIndex("index1").getShards(); + for (final ShardStats shardStats : firstBatchShardStats) { + if (shardStats.getShardRouting().primary()) { + long value = shardStats.getStats().getIndexing().getTotal().getIndexCount() - 1; + firstBatchNumDocsPerShard.put(shardStats.getShardRouting().shardId(), value); + } + } + + assertBusy(assertTask(numberOfPrimaryShards, firstBatchNumDocsPerShard)); + + for (int i = 0; i < firstBatchNumDocs; i++) { + assertBusy(assertExpectedDocumentRunnable(i)); + } + assertTotalNumberOfOptimizedIndexing(resolveFollowerIndex("index2"), numberOfPrimaryShards, firstBatchNumDocs); + pauseFollow("index2"); + } + public void testSyncMappings() throws Exception { final String leaderIndexSettings = getIndexSettings(2, between(0, 1), singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); @@ -844,6 +983,7 @@ private CheckedRunnable assertTask(final int numberOfPrimaryShards, f return () -> { final ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); final PersistentTasksCustomMetaData taskMetadata = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertNotNull(taskMetadata); ListTasksRequest listTasksRequest = new ListTasksRequest(); listTasksRequest.setDetailed(true); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java index 2c50411971a1b..f50f17c9e296d 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalIndexFollowingIT.java @@ -39,14 +39,14 @@ public void testFollowIndex() throws Exception { assertAcked(client().admin().indices().prepareCreate("leader").setSource(leaderIndexSettings, XContentType.JSON)); ensureGreen("leader"); - final PutFollowAction.Request followRequest = getPutFollowRequest("leader", "follower"); - client().execute(PutFollowAction.INSTANCE, followRequest).get(); - final long firstBatchNumDocs = randomIntBetween(2, 64); for (int i = 0; i < firstBatchNumDocs; i++) { client().prepareIndex("leader", "doc").setSource("{}", XContentType.JSON).get(); } + final PutFollowAction.Request followRequest = getPutFollowRequest("leader", "follower"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + assertBusy(() -> { assertThat(client().prepareSearch("follower").get().getHits().getTotalHits().value, equalTo(firstBatchNumDocs)); }); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java index 726a1c9893a50..d32a773ebe218 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/PutFollowActionRequestTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ccr.action; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; @@ -30,7 +31,7 @@ protected PutFollowAction.Request createTestInstance() { @Override protected PutFollowAction.Request doParseInstance(XContentParser parser) throws IOException { - return PutFollowAction.Request.fromXContent(parser, null); + return PutFollowAction.Request.fromXContent(parser, null, ActiveShardCount.DEFAULT); } @Override diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java index 5f352788d9597..3035b96b5bcac 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceServiceTests.java @@ -40,7 +40,7 @@ public void setUp() throws Exception { Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), "node").build(); taskQueue = new DeterministicTaskQueue(settings, random()); Set> registeredSettings = Sets.newHashSet(CcrSettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, - CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND); + CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND, CcrSettings.INDICES_RECOVERY_ACTION_TIMEOUT_SETTING); ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, registeredSettings); restoreSourceService = new CcrRestoreSourceService(taskQueue.getThreadPool(), new CcrSettings(Settings.EMPTY, clusterSettings)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 1b6f128318a2a..4e3feb3c8a2fa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -41,6 +41,7 @@ import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.beats.BeatsFeatureSetUsage; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; +import org.elasticsearch.xpack.core.ccr.CCRFeatureSet; import org.elasticsearch.xpack.core.deprecation.DeprecationInfoAction; import org.elasticsearch.xpack.core.graph.GraphFeatureSetUsage; import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; @@ -412,6 +413,7 @@ public List getNamedWriteables() { new NamedWriteableRegistry.Entry(MetaData.Custom.class, AutoFollowMetadata.TYPE, AutoFollowMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, AutoFollowMetadata.TYPE, in -> AutoFollowMetadata.readDiffFrom(MetaData.Custom.class, AutoFollowMetadata.TYPE, in)), + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.CCR, CCRFeatureSet.Usage::new), // ILM new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INDEX_LIFECYCLE, IndexLifecycleFeatureSetUsage::new), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index 0e6888dd80d73..0c763032e22ca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -33,6 +33,8 @@ public final class XPackField { public static final String ROLLUP = "rollup"; /** Name constant for the index lifecycle feature. */ public static final String INDEX_LIFECYCLE = "ilm"; + /** Name constant for the CCR feature. */ + public static final String CCR = "ccr"; private XPackField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/CCRFeatureSet.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/CCRFeatureSet.java new file mode 100644 index 0000000000000..f6e9e76d448f7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/CCRFeatureSet.java @@ -0,0 +1,174 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ccr; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; +import org.elasticsearch.xpack.core.XPackSettings; + +import java.io.IOException; +import java.time.Instant; +import java.util.Map; +import java.util.Objects; + +public class CCRFeatureSet implements XPackFeatureSet { + + private final boolean enabled; + private final XPackLicenseState licenseState; + private final ClusterService clusterService; + + @Inject + public CCRFeatureSet(Settings settings, @Nullable XPackLicenseState licenseState, ClusterService clusterService) { + this.enabled = XPackSettings.CCR_ENABLED_SETTING.get(settings); + this.licenseState = licenseState; + this.clusterService = clusterService; + } + + @Override + public String name() { + return XPackField.CCR; + } + + @Override + public String description() { + return "Cross Cluster Replication"; + } + + @Override + public boolean available() { + return licenseState != null && licenseState.isCcrAllowed(); + } + + @Override + public boolean enabled() { + return enabled; + } + + @Override + public Map nativeCodeInfo() { + return null; + } + + @Override + public void usage(ActionListener listener) { + MetaData metaData = clusterService.state().metaData(); + + int numberOfFollowerIndices = 0; + long lastFollowerIndexCreationDate = 0L; + for (IndexMetaData imd : metaData) { + if (imd.getCustomData("ccr") != null) { + numberOfFollowerIndices++; + if (lastFollowerIndexCreationDate < imd.getCreationDate()) { + lastFollowerIndexCreationDate = imd.getCreationDate(); + } + } + } + AutoFollowMetadata autoFollowMetadata = metaData.custom(AutoFollowMetadata.TYPE); + int numberOfAutoFollowPatterns = autoFollowMetadata != null ? autoFollowMetadata.getPatterns().size() : 0; + + Long lastFollowTimeInMillis; + if (numberOfFollowerIndices == 0) { + // Otherwise we would return a value that makes no sense. + lastFollowTimeInMillis = null; + } else { + lastFollowTimeInMillis = Math.max(0, Instant.now().toEpochMilli() - lastFollowerIndexCreationDate); + } + + Usage usage = + new Usage(available(), enabled(), numberOfFollowerIndices, numberOfAutoFollowPatterns, lastFollowTimeInMillis); + listener.onResponse(usage); + } + + public static class Usage extends XPackFeatureSet.Usage { + + private final int numberOfFollowerIndices; + private final int numberOfAutoFollowPatterns; + private final Long lastFollowTimeInMillis; + + public Usage(boolean available, + boolean enabled, + int numberOfFollowerIndices, + int numberOfAutoFollowPatterns, + Long lastFollowTimeInMillis) { + super(XPackField.CCR, available, enabled); + this.numberOfFollowerIndices = numberOfFollowerIndices; + this.numberOfAutoFollowPatterns = numberOfAutoFollowPatterns; + this.lastFollowTimeInMillis = lastFollowTimeInMillis; + } + + public Usage(StreamInput in) throws IOException { + super(in); + numberOfFollowerIndices = in.readVInt(); + numberOfAutoFollowPatterns = in.readVInt(); + if (in.readBoolean()) { + lastFollowTimeInMillis = in.readVLong(); + } else { + lastFollowTimeInMillis = null; + } + } + + public int getNumberOfFollowerIndices() { + return numberOfFollowerIndices; + } + + public int getNumberOfAutoFollowPatterns() { + return numberOfAutoFollowPatterns; + } + + public Long getLastFollowTimeInMillis() { + return lastFollowTimeInMillis; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(numberOfFollowerIndices); + out.writeVInt(numberOfAutoFollowPatterns); + if (lastFollowTimeInMillis != null) { + out.writeBoolean(true); + out.writeVLong(lastFollowTimeInMillis); + } else { + out.writeBoolean(false); + } + } + + @Override + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + super.innerXContent(builder, params); + builder.field("follower_indices_count", numberOfFollowerIndices); + builder.field("auto_follow_patterns_count", numberOfAutoFollowPatterns); + if (lastFollowTimeInMillis != null) { + builder.field("last_follow_time_in_millis", lastFollowTimeInMillis); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Usage usage = (Usage) o; + return numberOfFollowerIndices == usage.numberOfFollowerIndices && + numberOfAutoFollowPatterns == usage.numberOfAutoFollowPatterns && + Objects.equals(lastFollowTimeInMillis, usage.lastFollowTimeInMillis); + } + + @Override + public int hashCode() { + return Objects.hash(numberOfFollowerIndices, numberOfAutoFollowPatterns, lastFollowTimeInMillis); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index 47240a6a1da10..6ca91bd7f0dad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -6,10 +6,12 @@ package org.elasticsearch.xpack.core.ccr.action; +import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.ParseField; @@ -28,10 +30,10 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.FOLLOWER_INDEX_FIELD; -import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_READ_REQUEST_OPERATION_COUNT; -import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_READ_REQUEST_SIZE; import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_OUTSTANDING_READ_REQUESTS; import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_OUTSTANDING_WRITE_REQUESTS; +import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_READ_REQUEST_OPERATION_COUNT; +import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_READ_REQUEST_SIZE; import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_RETRY_DELAY_FIELD; import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_WRITE_BUFFER_COUNT; import static org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction.Request.MAX_WRITE_BUFFER_SIZE; @@ -105,7 +107,8 @@ public static class Request extends AcknowledgedRequest implements Indi ObjectParser.ValueType.STRING); } - public static Request fromXContent(final XContentParser parser, final String followerIndex) throws IOException { + public static Request fromXContent(final XContentParser parser, final String followerIndex, ActiveShardCount waitForActiveShards) + throws IOException { Request request = PARSER.parse(parser, followerIndex); if (followerIndex != null) { if (request.getFollowRequest().getFollowerIndex() == null) { @@ -116,11 +119,13 @@ public static Request fromXContent(final XContentParser parser, final String fol } } } + request.waitForActiveShards(waitForActiveShards); return request; } private String remoteCluster; private String leaderIndex; + private ActiveShardCount waitForActiveShards = ActiveShardCount.NONE; private ResumeFollowAction.Request followRequest; public Request() { @@ -142,6 +147,27 @@ public void setLeaderIndex(String leaderIndex) { this.leaderIndex = leaderIndex; } + public ActiveShardCount waitForActiveShards() { + return waitForActiveShards; + } + + /** + * Sets the number of shard copies that should be active for follower index creation to + * return. Defaults to {@link ActiveShardCount#NONE}, which will not wait for any shards + * to be active. Set this value to {@link ActiveShardCount#DEFAULT} to wait for the primary + * shard to be active. Set this value to {@link ActiveShardCount#ALL} to wait for all shards + * (primary and all replicas) to be active before returning. + * + * @param waitForActiveShards number of active shard copies to wait on + */ + public void waitForActiveShards(ActiveShardCount waitForActiveShards) { + if (waitForActiveShards.equals(ActiveShardCount.DEFAULT)) { + this.waitForActiveShards = ActiveShardCount.NONE; + } else { + this.waitForActiveShards = waitForActiveShards; + } + } + public ResumeFollowAction.Request getFollowRequest() { return followRequest; } @@ -176,6 +202,10 @@ public Request(StreamInput in) throws IOException { super(in); remoteCluster = in.readString(); leaderIndex = in.readString(); + // TODO: Update after backport + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + waitForActiveShards(ActiveShardCount.readFrom(in)); + } followRequest = new ResumeFollowAction.Request(in); } @@ -184,6 +214,10 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(remoteCluster); out.writeString(leaderIndex); + // TODO: Update after backport + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + waitForActiveShards.writeTo(out); + } followRequest.writeTo(out); } @@ -206,12 +240,23 @@ public boolean equals(Object o) { Request request = (Request) o; return Objects.equals(remoteCluster, request.remoteCluster) && Objects.equals(leaderIndex, request.leaderIndex) && + Objects.equals(waitForActiveShards, request.waitForActiveShards) && Objects.equals(followRequest, request.followRequest); } @Override public int hashCode() { - return Objects.hash(remoteCluster, leaderIndex, followRequest); + return Objects.hash(remoteCluster, leaderIndex, waitForActiveShards, followRequest); + } + + @Override + public String toString() { + return "PutFollowAction.Request{" + + "remoteCluster='" + remoteCluster + '\'' + + ", leaderIndex='" + leaderIndex + '\'' + + ", waitForActiveShards=" + waitForActiveShards + + ", followRequest=" + followRequest + + '}'; } } @@ -280,6 +325,15 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(followIndexCreated, followIndexShardsAcked, indexFollowingStarted); } + + @Override + public String toString() { + return "PutFollowAction.Response{" + + "followIndexCreated=" + followIndexCreated + + ", followIndexShardsAcked=" + followIndexShardsAcked + + ", indexFollowingStarted=" + indexFollowingStarted + + '}'; + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java index 9014c415f16bb..b1ec651500e0f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetaIndex.java @@ -29,6 +29,7 @@ public static XContentBuilder docMapping() throws IOException { XContentBuilder builder = jsonBuilder(); builder.startObject(); builder.startObject(TYPE); + ElasticsearchMappings.addMetaInformation(builder); ElasticsearchMappings.addDefaultMapping(builder); builder.startObject(ElasticsearchMappings.PROPERTIES) .startObject(Calendar.ID.getPreferredName()) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index 43462c552da63..54c83e9a88a75 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -137,7 +137,7 @@ public MlMetadata(StreamInput in) throws IOException { } this.datafeeds = datafeeds; this.groupOrJobLookup = new GroupOrJobLookup(jobs.values()); - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { this.upgradeMode = in.readBoolean(); } else { this.upgradeMode = false; @@ -148,7 +148,7 @@ public MlMetadata(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { writeMap(jobs, out); writeMap(datafeeds, out); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { out.writeBoolean(upgradeMode); } } @@ -201,7 +201,7 @@ public MlMetadataDiff(StreamInput in) throws IOException { MlMetadataDiff::readJobDiffFrom); this.datafeeds = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), DatafeedConfig::new, MlMetadataDiff::readDatafeedDiffFrom); - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { upgradeMode = in.readBoolean(); } else { upgradeMode = false; @@ -224,7 +224,7 @@ public MetaData.Custom apply(MetaData.Custom part) { public void writeTo(StreamOutput out) throws IOException { jobs.writeTo(out); datafeeds.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { out.writeBoolean(upgradeMode); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java index 843be6596c380..437aa40c925f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/annotations/AnnotationIndex.java @@ -109,10 +109,11 @@ public static void createAnnotationsIndexIfNecessary(Settings settings, Client c } public static XContentBuilder annotationsMapping() throws IOException { - return jsonBuilder() + XContentBuilder builder = jsonBuilder() .startObject() - .startObject(ElasticsearchMappings.DOC_TYPE) - .startObject(ElasticsearchMappings.PROPERTIES) + .startObject(ElasticsearchMappings.DOC_TYPE); + ElasticsearchMappings.addMetaInformation(builder); + builder.startObject(ElasticsearchMappings.PROPERTIES) .startObject(Annotation.ANNOTATION.getPreferredName()) .field(ElasticsearchMappings.TYPE, ElasticsearchMappings.TEXT) .endObject() @@ -143,5 +144,6 @@ public static XContentBuilder annotationsMapping() throws IOException { .endObject() .endObject() .endObject(); + return builder; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java index d979b897ad43a..08f73d791e53f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java @@ -29,8 +29,7 @@ public class JobTaskState implements PersistentTaskState { private static ParseField ALLOCATION_ID = new ParseField("allocation_id"); private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>(NAME, - args -> new JobTaskState((JobState) args[0], (Long) args[1])); + new ConstructingObjectParser<>(NAME, true, args -> new JobTaskState((JobState) args[0], (Long) args[1])); static { PARSER.declareField(constructorArg(), p -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index 0eb2e666916dc..d51a8f10e4a5a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -960,10 +960,10 @@ private static void addModelSizeStatsFieldsToMapping(XContentBuilder builder) th } public static XContentBuilder auditMessageMapping() throws IOException { - return jsonBuilder() - .startObject() - .startObject(AuditMessage.TYPE.getPreferredName()) - .startObject(PROPERTIES) + XContentBuilder builder = jsonBuilder().startObject() + .startObject(AuditMessage.TYPE.getPreferredName()); + addMetaInformation(builder); + builder.startObject(PROPERTIES) .startObject(Job.ID.getPreferredName()) .field(TYPE, KEYWORD) .endObject() @@ -987,6 +987,7 @@ public static XContentBuilder auditMessageMapping() throws IOException { .endObject() .endObject() .endObject(); + return builder; } static String[] mappingRequiresUpdate(ClusterState state, String[] concreteIndices, Version minVersion) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index 4c2a479721a2a..27fa8b2cd9da0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -259,12 +259,8 @@ private boolean check(String action) { private boolean check(String action, String index) { assert index != null; - return check(action) && (indexNameMatcher.test(index) - && (allowRestrictedIndices - // all good if it is not restricted - || (false == RestrictedIndicesNames.NAMES_SET.contains(index)) - // allow monitor as a special case, even for restricted - || IndexPrivilege.MONITOR.predicate().test(action))); + return check(action) && indexNameMatcher.test(index) + && (allowRestrictedIndices || (false == RestrictedIndicesNames.NAMES_SET.contains(index))); } boolean hasQuery() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 8711a6c318e58..35e2043acd809 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -144,6 +144,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.hasEntry; @@ -563,9 +564,33 @@ public void testRemoteMonitoringCollectorRole() { assertThat(remoteMonitoringAgentRole.indices().allowedIndicesMatcher(IndexAction.NAME) .test(randomFrom(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME)), is(false)); + assertMonitoringOnRestrictedIndices(remoteMonitoringAgentRole); + assertNoAccessAllowed(remoteMonitoringAgentRole, RestrictedIndicesNames.NAMES_SET); } + private void assertMonitoringOnRestrictedIndices(Role role) { + final Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build(); + final MetaData metaData = new MetaData.Builder() + .put(new IndexMetaData.Builder(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX) + .settings(indexSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .putAlias(new AliasMetaData.Builder(RestrictedIndicesNames.SECURITY_INDEX_NAME).build()) + .build(), true) + .build(); + final FieldPermissionsCache fieldPermissionsCache = new FieldPermissionsCache(Settings.EMPTY); + final List indexMonitoringActionNamesList = Arrays.asList(IndicesStatsAction.NAME, IndicesSegmentsAction.NAME, + GetSettingsAction.NAME, IndicesShardStoresAction.NAME, UpgradeStatusAction.NAME, RecoveryAction.NAME); + for (final String indexMonitoringActionName : indexMonitoringActionNamesList) { + final Map authzMap = role.indices().authorize(indexMonitoringActionName, + Sets.newHashSet(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX, RestrictedIndicesNames.SECURITY_INDEX_NAME), metaData, + fieldPermissionsCache); + assertThat(authzMap.get(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX).isGranted(), is(true)); + assertThat(authzMap.get(RestrictedIndicesNames.SECURITY_INDEX_NAME).isGranted(), is(true)); + } + } + public void testReportingUserRole() { final TransportRequest request = mock(TransportRequest.class); diff --git a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java index 65baeb5f168c4..f8ffce9cd817a 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java +++ b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java @@ -61,6 +61,7 @@ public void testBasicCCRAndILMIntegration() throws Exception { // Policy with the same name must exist in follower cluster too: putILMPolicy(policyName, "50GB", null, TimeValue.timeValueHours(7*24)); followIndex(indexName, indexName); + ensureGreen(indexName); // Aliases are not copied from leader index, so we need to add that for the rollover action in follower cluster: client().performRequest(new Request("PUT", "/" + indexName + "/_alias/logs")); @@ -116,6 +117,7 @@ public void testCCRUnfollowDuringSnapshot() throws Exception { } else if ("follow".equals(targetCluster)) { createNewSingletonPolicy("unfollow-only", "hot", new UnfollowAction(), TimeValue.ZERO); followIndex(indexName, indexName); + ensureGreen(indexName); // Create the repository before taking the snapshot. Request request = new Request("PUT", "/_snapshot/repo"); @@ -210,7 +212,7 @@ public void testCcrAndIlmWithRollover() throws Exception { "\"mappings\": {\"_doc\": {\"properties\": {\"field\": {\"type\": \"keyword\"}}}}, " + "\"aliases\": {\"" + alias + "\": {\"is_write_index\": true}} }"); assertOK(leaderClient.performRequest(createIndexRequest)); - // Check that the new index is creeg + // Check that the new index is created Request checkIndexRequest = new Request("GET", "/_cluster/health/" + indexName); checkIndexRequest.addParameter("wait_for_status", "green"); checkIndexRequest.addParameter("timeout", "70s"); @@ -226,6 +228,7 @@ public void testCcrAndIlmWithRollover() throws Exception { index(leaderClient, indexName, "1"); assertDocumentExists(leaderClient, indexName, "1"); + ensureGreen(indexName); assertBusy(() -> { assertDocumentExists(client(), indexName, "1"); // Sanity check that following_index setting has been set, so that we can verify later that this setting has been unset: diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java index 1834b0b3c0616..6ec77a34c55b6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java @@ -164,6 +164,13 @@ protected void masterOperation(SetUpgradeModeAction.Request request, ClusterStat wrappedListener.onFailure(new ElasticsearchTimeoutException("Unknown error occurred while updating cluster state")); return; } + + // There are no tasks to worry about starting/stopping + if (tasksCustomMetaData == null || tasksCustomMetaData.tasks().isEmpty()) { + wrappedListener.onResponse(new AcknowledgedResponse(true)); + return; + } + // Did we change from disabled -> enabled? if (request.isEnabled()) { isolateDatafeeds(tasksCustomMetaData, isolateDatafeedListener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java index 4223bff49825e..451f480b27830 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java @@ -7,7 +7,6 @@ import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import java.util.List; import java.util.Map; @@ -52,7 +51,7 @@ public ExtractionMethod getExtractionMethod() { public abstract Object[] value(SearchHit hit); public String getDocValueFormat() { - return DocValueFieldsContext.USE_DEFAULT_FORMAT; + return null; } public static ExtractedField newTimeField(String name, ExtractionMethod extractionMethod) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java index d9ea6cb7c32e4..36e71de8bcba1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java @@ -44,7 +44,6 @@ import org.elasticsearch.index.query.WildcardQueryBuilder; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdate; @@ -198,7 +197,7 @@ public void onFailure(Exception e) { public void findDatafeedsForJobIds(Collection jobIds, ActionListener> listener) { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildDatafeedJobIdsQuery(jobIds)); sourceBuilder.fetchSource(false); - sourceBuilder.docValueField(DatafeedConfig.ID.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); + sourceBuilder.docValueField(DatafeedConfig.ID.getPreferredName(), null); SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) @@ -366,7 +365,7 @@ public void expandDatafeedIds(String expression, boolean allowNoDatafeeds, Actio SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildDatafeedIdQuery(tokens)); sourceBuilder.sort(DatafeedConfig.ID.getPreferredName()); sourceBuilder.fetchSource(false); - sourceBuilder.docValueField(DatafeedConfig.ID.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); + sourceBuilder.docValueField(DatafeedConfig.ID.getPreferredName(), null); SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java index a21e6bc8c133c..9019dc2032ccf 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java @@ -52,7 +52,6 @@ import org.elasticsearch.index.query.WildcardQueryBuilder; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; @@ -424,7 +423,7 @@ public void jobIdMatches(List ids, ActionListener> listener SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(boolQueryBuilder); sourceBuilder.fetchSource(false); - sourceBuilder.docValueField(Job.ID.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); + sourceBuilder.docValueField(Job.ID.getPreferredName(), null); SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) @@ -509,8 +508,8 @@ public void expandJobsIds(String expression, boolean allowNoJobs, boolean exclud SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildQuery(tokens, excludeDeleting)); sourceBuilder.sort(Job.ID.getPreferredName()); sourceBuilder.fetchSource(false); - sourceBuilder.docValueField(Job.ID.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); - sourceBuilder.docValueField(Job.GROUPS.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); + sourceBuilder.docValueField(Job.ID.getPreferredName(), null); + sourceBuilder.docValueField(Job.GROUPS.getPreferredName(), null); SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) @@ -554,8 +553,8 @@ private SearchRequest makeExpandIdsSearchRequest(String expression, boolean excl SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildQuery(tokens, excludeDeleting)); sourceBuilder.sort(Job.ID.getPreferredName()); sourceBuilder.fetchSource(false); - sourceBuilder.docValueField(Job.ID.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); - sourceBuilder.docValueField(Job.GROUPS.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT); + sourceBuilder.docValueField(Job.ID.getPreferredName(), null); + sourceBuilder.docValueField(Job.GROUPS.getPreferredName(), null); return client.prepareSearch(AnomalyDetectorsIndex.configIndexName()) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) @@ -638,7 +637,7 @@ public void expandGroupIds(List groupIds, ActionListener notExecuted = new ArrayList<>(); + queue.drainTo(notExecuted); + + for (Runnable runnable : notExecuted) { + if (runnable instanceof AbstractRunnable) { + ((AbstractRunnable) runnable).onRejection( + new EsRejectedExecutionException("unable to process as autodetect worker service has shutdown", true)); + } + } + } + } } catch (InterruptedException e) { Thread.currentThread().interrupt(); } finally { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java index ad999daafb254..098aa48a4f75c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ml.datafeed.extractor.fields; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ml.test.SearchHitBuilder; @@ -143,7 +142,7 @@ public void testAliasVersusName() { public void testGetDocValueFormat() { for (ExtractedField.ExtractionMethod method : ExtractedField.ExtractionMethod.values()) { - assertThat(ExtractedField.newField("f", method).getDocValueFormat(), equalTo(DocValueFieldsContext.USE_DEFAULT_FORMAT)); + assertThat(ExtractedField.newField("f", method).getDocValueFormat(), equalTo(null)); } assertThat(ExtractedField.newTimeField("doc_value_time", ExtractedField.ExtractionMethod.DOC_VALUE).getDocValueFormat(), equalTo("epoch_millis")); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldsTests.java index 222531141364a..db25f820dbbd8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldsTests.java @@ -7,7 +7,6 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; @@ -63,7 +62,7 @@ public void testBuildGivenMixtureOfTypes() { assertThat(extractedFields.getDocValueFields().get(0).getName(), equalTo("time")); assertThat(extractedFields.getDocValueFields().get(0).getDocValueFormat(), equalTo("epoch_millis")); assertThat(extractedFields.getDocValueFields().get(1).getName(), equalTo("value")); - assertThat(extractedFields.getDocValueFields().get(1).getDocValueFormat(), equalTo(DocValueFieldsContext.USE_DEFAULT_FORMAT)); + assertThat(extractedFields.getDocValueFields().get(1).getDocValueFormat(), equalTo(null)); assertThat(extractedFields.getSourceFields(), equalTo(new String[] {"airline"})); assertThat(extractedFields.getAllFields().size(), equalTo(4)); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java index 20dd49029b3ea..6e7a3740e0a09 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; @@ -134,7 +133,7 @@ public void testBuildGivenMixtureOfTypes() { assertThat(extractedFields.getDocValueFields().get(0).getName(), equalTo("time")); assertThat(extractedFields.getDocValueFields().get(0).getDocValueFormat(), equalTo("epoch_millis")); assertThat(extractedFields.getDocValueFields().get(1).getName(), equalTo("value")); - assertThat(extractedFields.getDocValueFields().get(1).getDocValueFormat(), equalTo(DocValueFieldsContext.USE_DEFAULT_FORMAT)); + assertThat(extractedFields.getDocValueFields().get(1).getDocValueFormat(), equalTo(null)); assertThat(extractedFields.getSourceFields().length, equalTo(1)); assertThat(extractedFields.getSourceFields()[0], equalTo("airline")); assertThat(extractedFields.getAllFields().size(), equalTo(4)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java index 4dfd1965804e5..26560f1034f9e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/config/JobTaskStateTests.java @@ -27,4 +27,9 @@ protected Writeable.Reader instanceReader() { protected JobTaskState doParseInstance(XContentParser parser) { return JobTaskState.fromXContent(parser); } + + @Override + protected boolean supportsUnknownFields() { + return true; + } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index 82788d4500b09..6b4fd270b1bb7 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -54,6 +55,7 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.params.TimeRange; import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerFactory; import org.elasticsearch.xpack.ml.notifications.Auditor; +import org.junit.After; import org.junit.Before; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; @@ -72,9 +74,11 @@ import java.util.TreeMap; import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; @@ -125,6 +129,8 @@ public class AutodetectProcessManagerTests extends ESTestCase { private Quantiles quantiles = new Quantiles("foo", new Date(), "state"); private Set filters = new HashSet<>(); + private ThreadPool threadPool; + @Before public void setup() throws Exception { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build(); @@ -159,8 +165,16 @@ public void setup() throws Exception { handler.accept(buildAutodetectParams()); return null; }).when(jobResultsProvider).getAutodetectParams(any(), any(), any()); + + threadPool = new TestThreadPool("AutodetectProcessManagerTests"); } + @After + public void stopThreadPool() throws InterruptedException { + terminate(threadPool); + } + + public void testMaxOpenJobsSetting_givenDefault() { int maxOpenJobs = AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.get(Settings.EMPTY); assertEquals(20, maxOpenJobs); @@ -690,6 +704,62 @@ public void testAutodetectWorkerExecutorServiceDoesNotSwallowErrors() { } } + public void testAutodetectWorkerExecutorService_SubmitAfterShutdown() { + AutodetectProcessManager.AutodetectWorkerExecutorService executor = + new AutodetectWorkerExecutorService(new ThreadContext(Settings.EMPTY)); + + threadPool.generic().execute(() -> executor.start()); + executor.shutdown(); + expectThrows(EsRejectedExecutionException.class, () -> executor.execute(() -> {})); + } + + public void testAutodetectWorkerExecutorService_TasksNotExecutedCallHandlerOnShutdown() + throws InterruptedException, ExecutionException { + AutodetectProcessManager.AutodetectWorkerExecutorService executor = + new AutodetectWorkerExecutorService(new ThreadContext(Settings.EMPTY)); + + CountDownLatch latch = new CountDownLatch(1); + + Future executorFinished = threadPool.generic().submit(() -> executor.start()); + + // run a task that will block while the others are queued up + executor.execute(() -> { + try { + latch.await(); + } catch (InterruptedException e) { + } + }); + + AtomicBoolean runnableShouldNotBeCalled = new AtomicBoolean(false); + executor.execute(() -> runnableShouldNotBeCalled.set(true)); + + AtomicInteger onFailureCallCount = new AtomicInteger(); + AtomicInteger doRunCallCount = new AtomicInteger(); + for (int i=0; i<2; i++) { + executor.execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + onFailureCallCount.incrementAndGet(); + } + + @Override + protected void doRun() { + doRunCallCount.incrementAndGet(); + } + }); + } + + // now shutdown + executor.shutdown(); + latch.countDown(); + executorFinished.get(); + + assertFalse(runnableShouldNotBeCalled.get()); + // the AbstractRunnables should have had their callbacks called + assertEquals(2, onFailureCallCount.get()); + assertEquals(0, doRunCallCount.get()); + } + private AutodetectProcessManager createNonSpyManager(String jobId) { Client client = mock(Client.class); ThreadPool threadPool = mock(ThreadPool.class); diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java index 809e4a6d30524..4b30224dcd481 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java @@ -38,7 +38,6 @@ import org.elasticsearch.xpack.core.ssl.PemUtils; import javax.security.auth.x500.X500Principal; - import java.io.IOException; import java.io.OutputStream; import java.io.OutputStreamWriter; @@ -154,7 +153,7 @@ private static class InputFileParser { } public static void main(String[] args) throws Exception { - new CertificateGenerateTool().main(args, Terminal.DEFAULT); + exit(new CertificateGenerateTool().main(args, Terminal.DEFAULT)); } @Override diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java index a966cac9109d9..435305b8a6914 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java @@ -134,7 +134,7 @@ private static class CertificateToolParser { public static void main(String[] args) throws Exception { - new CertificateTool().main(args, Terminal.DEFAULT); + exit(new CertificateTool().main(args, Terminal.DEFAULT)); } CertificateTool() { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java index 6fa59269ac7e6..a60b2204095a5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java @@ -90,7 +90,7 @@ public class SamlMetadataCommand extends EnvironmentAwareCommand { private KeyStoreWrapper keyStoreWrapper; public static void main(String[] args) throws Exception { - new SamlMetadataCommand().main(args, Terminal.DEFAULT); + exit(new SamlMetadataCommand().main(args, Terminal.DEFAULT)); } public SamlMetadataCommand() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java index 675300e25760e..da03e9ffe3d1e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java @@ -7,14 +7,27 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.upgrade.get.UpgradeStatusResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.junit.After; +import org.junit.Before; import java.util.Collections; @@ -25,11 +38,29 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.containsInAnyOrder; public class MultipleIndicesPermissionsTests extends SecurityIntegTestCase { protected static final SecureString USERS_PASSWD = new SecureString("passwd".toCharArray()); + @Before + public void waitForSecurityIndexWritable() throws Exception { + // adds a dummy user to the native realm to force .security index creation + securityClient().preparePutUser("dummy_user", "password".toCharArray(), Hasher.BCRYPT, "missing_role").get(); + assertSecurityIndexActive(); + } + + @After + public void cleanupSecurityIndex() throws Exception { + super.deleteSecurityIndex(); + } + + @Override + protected boolean addMockHttpTransport() { + return false; // enable http + } + @Override protected String configRoles() { return SecuritySettingsSource.TEST_ROLE + ":\n" + @@ -49,6 +80,12 @@ protected String configRoles() { " - names: 'a'\n" + " privileges: [all]\n" + "\n" + + "role_monitor_all_unrestricted_indices:\n" + + " cluster: [monitor]\n" + + " indices:\n" + + " - names: '*'\n" + + " privileges: [monitor]\n" + + "\n" + "role_b:\n" + " indices:\n" + " - names: 'b'\n" + @@ -60,14 +97,16 @@ protected String configUsers() { final String usersPasswdHashed = new String(getFastStoredHashAlgoForTests().hash(USERS_PASSWD)); return SecuritySettingsSource.CONFIG_STANDARD_USER + "user_a:" + usersPasswdHashed + "\n" + - "user_ab:" + usersPasswdHashed + "\n"; + "user_ab:" + usersPasswdHashed + "\n" + + "user_monitor:" + usersPasswdHashed + "\n"; } @Override protected String configUsersRoles() { return SecuritySettingsSource.CONFIG_STANDARD_USER_ROLES + "role_a:user_a,user_ab\n" + - "role_b:user_ab\n"; + "role_b:user_ab\n" + + "role_monitor_all_unrestricted_indices:user_monitor\n"; } public void testSingleRole() throws Exception { @@ -127,6 +166,71 @@ public void testSingleRole() throws Exception { assertHitCount(searchResponse, 1); } + public void testMonitorRestrictedWildcards() throws Exception { + + IndexResponse indexResponse = index("foo", "type", jsonBuilder() + .startObject() + .field("name", "value") + .endObject()); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + + indexResponse = index("foobar", "type", jsonBuilder() + .startObject() + .field("name", "value") + .endObject()); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + + indexResponse = index("foobarfoo", "type", jsonBuilder() + .startObject() + .field("name", "value") + .endObject()); + assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); + + refresh(); + + final Client client = client() + .filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user_monitor", USERS_PASSWD))); + + final GetSettingsResponse getSettingsResponse = client.admin().indices().prepareGetSettings(randomFrom("*", "_all", "foo*")).get(); + assertThat(getSettingsResponse.getIndexToSettings().size(), is(3)); + assertThat(getSettingsResponse.getIndexToSettings().containsKey("foo"), is(true)); + assertThat(getSettingsResponse.getIndexToSettings().containsKey("foobar"), is(true)); + assertThat(getSettingsResponse.getIndexToSettings().containsKey("foobarfoo"), is(true)); + + final IndicesShardStoresResponse indicesShardsStoresResponse = client.admin().indices() + .prepareShardStores(randomFrom("*", "_all", "foo*")).setShardStatuses("all").get(); + assertThat(indicesShardsStoresResponse.getStoreStatuses().size(), is(3)); + assertThat(indicesShardsStoresResponse.getStoreStatuses().containsKey("foo"), is(true)); + assertThat(indicesShardsStoresResponse.getStoreStatuses().containsKey("foobar"), is(true)); + assertThat(indicesShardsStoresResponse.getStoreStatuses().containsKey("foobarfoo"), is(true)); + + final UpgradeStatusResponse upgradeStatusResponse = client.admin().indices().prepareUpgradeStatus(randomFrom("*", "_all", "foo*")) + .get(); + assertThat(upgradeStatusResponse.getIndices().size(), is(3)); + assertThat(upgradeStatusResponse.getIndices().keySet(), containsInAnyOrder("foo", "foobar", "foobarfoo")); + + final IndicesStatsResponse indicesStatsResponse = client.admin().indices().prepareStats(randomFrom("*", "_all", "foo*")).get(); + assertThat(indicesStatsResponse.getIndices().size(), is(3)); + assertThat(indicesStatsResponse.getIndices().keySet(), containsInAnyOrder("foo", "foobar", "foobarfoo")); + + final IndicesSegmentResponse indicesSegmentResponse = client.admin().indices().prepareSegments("*").get(); + assertThat(indicesSegmentResponse.getIndices().size(), is(3)); + assertThat(indicesSegmentResponse.getIndices().keySet(), containsInAnyOrder("foo", "foobar", "foobarfoo")); + + final RecoveryResponse indicesRecoveryResponse = client.admin().indices().prepareRecoveries("*").get(); + assertThat(indicesRecoveryResponse.shardRecoveryStates().size(), is(3)); + assertThat(indicesRecoveryResponse.shardRecoveryStates().keySet(), containsInAnyOrder("foo", "foobar", "foobarfoo")); + + // test _cat/indices with wildcards that cover unauthorized indices (".security" in this case) + RequestOptions.Builder optionsBuilder = RequestOptions.DEFAULT.toBuilder(); + optionsBuilder.addHeader("Authorization", UsernamePasswordToken.basicAuthHeaderValue("user_monitor", USERS_PASSWD)); + RequestOptions options = optionsBuilder.build(); + Request catIndicesRequest = new Request("GET", "/_cat/indices/" + randomFrom("*", "_all", "foo*")); + catIndicesRequest.setOptions(options); + Response catIndicesResponse = getRestClient().performRequest(catIndicesRequest); + assertThat(catIndicesResponse.getStatusLine().getStatusCode() < 300, is(true)); + } + public void testMultipleRoles() throws Exception { IndexResponse indexResponse = index("a", "type", jsonBuilder() .startObject() diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java index adeb4a7f86569..d72130db2f571 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java @@ -78,6 +78,7 @@ public class SecuritySettingsSource extends NodeConfigurationSource { " cluster: [ ALL ]\n" + " indices:\n" + " - names: '*'\n" + + " allow_restricted_indices: true\n" + " privileges: [ ALL ]\n"; private final Path parentFolder; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index dccc8f3ce587a..171e11614c5f3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -73,6 +73,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -151,7 +152,8 @@ import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationException; import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationExceptionRunAs; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; -import static org.hamcrest.Matchers.arrayContaining; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_SECURITY_INDEX; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; @@ -804,7 +806,7 @@ public void testRunAsRequestWithValidPermissions() { verifyNoMoreInteractions(auditTrail); } - public void testNonXPackUserCannotExecuteOperationAgainstSecurityIndex() { + public void testGrantAllRestrictedUserCannotExecuteOperationAgainstSecurityIndices() { RoleDescriptor role = new RoleDescriptor("all access", new String[]{"all"}, new IndicesPrivileges[]{IndicesPrivileges.builder().indices("*").privileges("all").build()}, null); final Authentication authentication = createAuthentication(new User("all_access_user", "all_access")); @@ -812,29 +814,41 @@ public void testNonXPackUserCannotExecuteOperationAgainstSecurityIndex() { ClusterState state = mock(ClusterState.class); when(clusterService.state()).thenReturn(state); when(state.metaData()).thenReturn(MetaData.builder() - .put(new IndexMetaData.Builder(SECURITY_INDEX_NAME) - .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) - .numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder(INTERNAL_SECURITY_INDEX) + .putAlias(new AliasMetaData.Builder(SECURITY_INDEX_NAME).build()) + .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) + .numberOfShards(1) + .numberOfReplicas(0) + .build(),true) .build()); final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); List> requests = new ArrayList<>(); - requests.add(new Tuple<>(BulkAction.NAME + "[s]", - new DeleteRequest(SECURITY_INDEX_NAME, "type", "id"))); - requests.add(new Tuple<>(UpdateAction.NAME, - new UpdateRequest(SECURITY_INDEX_NAME, "type", "id"))); - requests.add(new Tuple<>(BulkAction.NAME + "[s]", - new IndexRequest(SECURITY_INDEX_NAME, "type", "id"))); - requests.add(new Tuple<>(SearchAction.NAME, new SearchRequest(SECURITY_INDEX_NAME))); - requests.add(new Tuple<>(TermVectorsAction.NAME, - new TermVectorsRequest(SECURITY_INDEX_NAME, "type", "id"))); - requests.add(new Tuple<>(GetAction.NAME, new GetRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add( + new Tuple<>(BulkAction.NAME + "[s]", new DeleteRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX), "id"))); + requests.add(new Tuple<>(UpdateAction.NAME, new UpdateRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX), "id"))); + requests.add(new Tuple<>(BulkAction.NAME + "[s]", new IndexRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)))); + requests.add(new Tuple<>(SearchAction.NAME, new SearchRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)))); requests.add(new Tuple<>(TermVectorsAction.NAME, - new TermVectorsRequest(SECURITY_INDEX_NAME, "type", "id"))); + new TermVectorsRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX), "type", "id"))); + requests.add(new Tuple<>(GetAction.NAME, new GetRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX), "id"))); requests.add(new Tuple<>(IndicesAliasesAction.NAME, new IndicesAliasesRequest() - .addAliasAction(AliasActions.add().alias("security_alias").index(SECURITY_INDEX_NAME)))); + .addAliasAction(AliasActions.add().alias("security_alias").index(INTERNAL_SECURITY_INDEX)))); + requests.add(new Tuple<>(UpdateSettingsAction.NAME, + new UpdateSettingsRequest().indices(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)))); + // cannot execute monitor operations + requests.add(new Tuple<>(IndicesStatsAction.NAME, + new IndicesStatsRequest().indices(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)))); requests.add( - new Tuple<>(UpdateSettingsAction.NAME, new UpdateSettingsRequest().indices(SECURITY_INDEX_NAME))); + new Tuple<>(RecoveryAction.NAME, new RecoveryRequest().indices(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)))); + requests.add(new Tuple<>(IndicesSegmentsAction.NAME, + new IndicesSegmentsRequest().indices(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)))); + requests.add(new Tuple<>(GetSettingsAction.NAME, + new GetSettingsRequest().indices(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)))); + requests.add(new Tuple<>(IndicesShardStoresAction.NAME, + new IndicesShardStoresRequest().indices(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)))); + requests.add(new Tuple<>(UpgradeStatusAction.NAME, + new UpgradeStatusRequest().indices(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)))); for (Tuple requestTuple : requests) { String action = requestTuple.v1(); @@ -847,12 +861,12 @@ public void testNonXPackUserCannotExecuteOperationAgainstSecurityIndex() { } // we should allow waiting for the health of the index or any index if the user has this permission - ClusterHealthRequest request = new ClusterHealthRequest(SECURITY_INDEX_NAME); + ClusterHealthRequest request = new ClusterHealthRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)); authorize(authentication, ClusterHealthAction.NAME, request); verify(auditTrail).accessGranted(requestId, authentication, ClusterHealthAction.NAME, request, new String[]{role.getName()}); // multiple indices - request = new ClusterHealthRequest(SECURITY_INDEX_NAME, "foo", "bar"); + request = new ClusterHealthRequest(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX, "foo", "bar"); authorize(authentication, ClusterHealthAction.NAME, request); verify(auditTrail).accessGranted(requestId, authentication, ClusterHealthAction.NAME, request, new String[]{role.getName()}); verifyNoMoreInteractions(auditTrail); @@ -863,17 +877,24 @@ public void testNonXPackUserCannotExecuteOperationAgainstSecurityIndex() { assertEquals(IndicesAndAliasesResolver.NO_INDICES_OR_ALIASES_LIST, Arrays.asList(searchRequest.indices())); } - public void testGrantedNonXPackUserCanExecuteMonitoringOperationsAgainstSecurityIndex() { - RoleDescriptor role = new RoleDescriptor("all access", new String[]{"all"}, - new IndicesPrivileges[]{IndicesPrivileges.builder().indices("*").privileges("all").build()}, null); - final Authentication authentication = createAuthentication(new User("all_access_user", "all_access")); - roleMap.put("all_access", role); + public void testMonitoringOperationsAgainstSecurityIndexRequireAllowRestricted() { + final RoleDescriptor restrictedMonitorRole = new RoleDescriptor("restricted_monitor", null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("*").privileges("monitor").build() }, null); + final RoleDescriptor unrestrictedMonitorRole = new RoleDescriptor("unrestricted_monitor", null, new IndicesPrivileges[] { + IndicesPrivileges.builder().indices("*").privileges("monitor").allowRestrictedIndices(true).build() }, null); + roleMap.put("restricted_monitor", restrictedMonitorRole); + roleMap.put("unrestricted_monitor", unrestrictedMonitorRole); + final Authentication restrictedUserAuthn = createAuthentication(new User("restricted_user", "restricted_monitor")); + final Authentication unrestrictedUserAuthn = createAuthentication(new User("unrestricted_user", "unrestricted_monitor")); ClusterState state = mock(ClusterState.class); when(clusterService.state()).thenReturn(state); when(state.metaData()).thenReturn(MetaData.builder() - .put(new IndexMetaData.Builder(SECURITY_INDEX_NAME) - .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) - .numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder(INTERNAL_SECURITY_INDEX) + .putAlias(new AliasMetaData.Builder(SECURITY_INDEX_NAME).build()) + .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) + .numberOfShards(1) + .numberOfReplicas(0) + .build(), true) .build()); final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); @@ -882,16 +903,18 @@ public void testGrantedNonXPackUserCanExecuteMonitoringOperationsAgainstSecurity requests.add(new Tuple<>(RecoveryAction.NAME, new RecoveryRequest().indices(SECURITY_INDEX_NAME))); requests.add(new Tuple<>(IndicesSegmentsAction.NAME, new IndicesSegmentsRequest().indices(SECURITY_INDEX_NAME))); requests.add(new Tuple<>(GetSettingsAction.NAME, new GetSettingsRequest().indices(SECURITY_INDEX_NAME))); - requests.add(new Tuple<>(IndicesShardStoresAction.NAME, - new IndicesShardStoresRequest().indices(SECURITY_INDEX_NAME))); - requests.add(new Tuple<>(UpgradeStatusAction.NAME, - new UpgradeStatusRequest().indices(SECURITY_INDEX_NAME))); + requests.add(new Tuple<>(IndicesShardStoresAction.NAME, new IndicesShardStoresRequest().indices(SECURITY_INDEX_NAME))); + requests.add(new Tuple<>(UpgradeStatusAction.NAME, new UpgradeStatusRequest().indices(SECURITY_INDEX_NAME))); for (final Tuple requestTuple : requests) { final String action = requestTuple.v1(); final TransportRequest request = requestTuple.v2(); - authorize(authentication, action, request); - verify(auditTrail).accessGranted(requestId, authentication, action, request, new String[]{role.getName()}); + assertThrowsAuthorizationException(() -> authorize(restrictedUserAuthn, action, request), action, "restricted_user"); + verify(auditTrail).accessDenied(requestId, restrictedUserAuthn, action, request, new String[] { "restricted_monitor" }); + verifyNoMoreInteractions(auditTrail); + authorize(unrestrictedUserAuthn, action, request); + verify(auditTrail).accessGranted(requestId, unrestrictedUserAuthn, action, request, new String[] { "unrestricted_monitor" }); + verifyNoMoreInteractions(auditTrail); } } @@ -901,34 +924,35 @@ public void testSuperusersCanExecuteOperationAgainstSecurityIndex() { ClusterState state = mock(ClusterState.class); when(clusterService.state()).thenReturn(state); when(state.metaData()).thenReturn(MetaData.builder() - .put(new IndexMetaData.Builder(SECURITY_INDEX_NAME) - .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) - .numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder(INTERNAL_SECURITY_INDEX) + .putAlias(new AliasMetaData.Builder(SECURITY_INDEX_NAME).build()) + .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) + .numberOfShards(1) + .numberOfReplicas(0) + .build(), true) .build()); final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); List> requests = new ArrayList<>(); - requests.add(new Tuple<>(DeleteAction.NAME, - new DeleteRequest(SECURITY_INDEX_NAME, "type", "id"))); + requests.add(new Tuple<>(DeleteAction.NAME, new DeleteRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX), "id"))); requests.add(new Tuple<>(BulkAction.NAME + "[s]", - createBulkShardRequest(SECURITY_INDEX_NAME, DeleteRequest::new))); - requests.add(new Tuple<>(UpdateAction.NAME, - new UpdateRequest(SECURITY_INDEX_NAME, "type", "id"))); - requests.add(new Tuple<>(IndexAction.NAME, - new IndexRequest(SECURITY_INDEX_NAME, "type", "id"))); + createBulkShardRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX), DeleteRequest::new))); + requests.add(new Tuple<>(UpdateAction.NAME, new UpdateRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX), "id"))); + requests.add(new Tuple<>(IndexAction.NAME, new IndexRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)))); requests.add(new Tuple<>(BulkAction.NAME + "[s]", - createBulkShardRequest(SECURITY_INDEX_NAME, IndexRequest::new))); - requests.add(new Tuple<>(SearchAction.NAME, new SearchRequest(SECURITY_INDEX_NAME))); + createBulkShardRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX), IndexRequest::new))); + requests.add(new Tuple<>(SearchAction.NAME, new SearchRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)))); requests.add(new Tuple<>(TermVectorsAction.NAME, - new TermVectorsRequest(SECURITY_INDEX_NAME, "type", "id"))); - requests.add(new Tuple<>(GetAction.NAME, new GetRequest(SECURITY_INDEX_NAME, "type", "id"))); + new TermVectorsRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX), "type", "id"))); + requests.add(new Tuple<>(GetAction.NAME, new GetRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX), "type", "id"))); requests.add(new Tuple<>(TermVectorsAction.NAME, - new TermVectorsRequest(SECURITY_INDEX_NAME, "type", "id"))); - requests.add(new Tuple<>(IndicesAliasesAction.NAME, new IndicesAliasesRequest() - .addAliasAction(AliasActions.add().alias("security_alias").index(SECURITY_INDEX_NAME)))); - requests.add(new Tuple<>(ClusterHealthAction.NAME, new ClusterHealthRequest(SECURITY_INDEX_NAME))); + new TermVectorsRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX), "type", "id"))); + requests.add(new Tuple<>(IndicesAliasesAction.NAME, + new IndicesAliasesRequest().addAliasAction(AliasActions.add().alias("security_alias").index(INTERNAL_SECURITY_INDEX)))); + requests.add( + new Tuple<>(ClusterHealthAction.NAME, new ClusterHealthRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)))); requests.add(new Tuple<>(ClusterHealthAction.NAME, - new ClusterHealthRequest(SECURITY_INDEX_NAME, "foo", "bar"))); + new ClusterHealthRequest(randomFrom(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX), "foo", "bar"))); for (final Tuple requestTuple : requests) { final String action = requestTuple.v1(); @@ -946,9 +970,12 @@ public void testSuperusersCanExecuteOperationAgainstSecurityIndexWithWildcard() ClusterState state = mock(ClusterState.class); when(clusterService.state()).thenReturn(state); when(state.metaData()).thenReturn(MetaData.builder() - .put(new IndexMetaData.Builder(SECURITY_INDEX_NAME) - .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) - .numberOfShards(1).numberOfReplicas(0).build(), true) + .put(new IndexMetaData.Builder(INTERNAL_SECURITY_INDEX) + .putAlias(new AliasMetaData.Builder(SECURITY_INDEX_NAME).build()) + .settings(Settings.builder().put("index.version.created", Version.CURRENT).build()) + .numberOfShards(1) + .numberOfReplicas(0) + .build(), true) .build()); final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); @@ -956,7 +983,7 @@ public void testSuperusersCanExecuteOperationAgainstSecurityIndexWithWildcard() SearchRequest request = new SearchRequest("_all"); authorize(createAuthentication(superuser), action, request); verify(auditTrail).accessGranted(requestId, authentication, action, request, superuser.roles()); - assertThat(request.indices(), arrayContaining(".security")); + assertThat(request.indices(), arrayContainingInAnyOrder(INTERNAL_SECURITY_INDEX, SECURITY_INDEX_NAME)); } public void testAnonymousRolesAreAppliedToOtherUsers() { diff --git a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java index 58c5ae5c78e91..b7d0649943116 100644 --- a/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java +++ b/x-pack/plugin/sql/qa/single-node/src/test/java/org/elasticsearch/xpack/sql/qa/single_node/CliExplainIT.java @@ -103,7 +103,6 @@ public void testExplainWithWhere() throws IOException { assertThat(readLine(), startsWith(" \"docvalue_fields\" : [")); assertThat(readLine(), startsWith(" {")); assertThat(readLine(), startsWith(" \"field\" : \"i\"")); - assertThat(readLine(), startsWith(" \"format\" : \"use_field_mapping\"")); assertThat(readLine(), startsWith(" }")); assertThat(readLine(), startsWith(" ],")); assertThat(readLine(), startsWith(" \"sort\" : [")); diff --git a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateResponseTests.java b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateResponseTests.java index 647bb90952073..76a04d03435e3 100644 --- a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateResponseTests.java +++ b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateResponseTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.action; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.test.AbstractStreamableTestCase; import org.elasticsearch.xpack.sql.action.SqlTranslateResponse; @@ -20,7 +19,7 @@ protected SqlTranslateResponse createTestInstance() { if (randomBoolean()) { long docValues = iterations(5, 10); for (int i = 0; i < docValues; i++) { - s.docValueField(randomAlphaOfLength(10), DocValueFieldsContext.USE_DEFAULT_FORMAT); + s.docValueField(randomAlphaOfLength(10)); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilder.java index 4c4bee9440ba1..1872748c328fa 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SqlSourceBuilder.java @@ -8,7 +8,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.script.Script; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext.FieldAndFormat; import java.util.LinkedHashMap; @@ -69,8 +68,7 @@ public void build(SearchSourceBuilder sourceBuilder) { if (!sourceFields.isEmpty()) { sourceBuilder.fetchSource(sourceFields.toArray(Strings.EMPTY_ARRAY), null); } - docFields.forEach(field -> sourceBuilder.docValueField(field.field, - field.format == null ? DocValueFieldsContext.USE_DEFAULT_FORMAT : field.format)); + docFields.forEach(field -> sourceBuilder.docValueField(field.field, field.format)); scriptFields.forEach(sourceBuilder::scriptField); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java index 295932cd99c5e..76c7bda320012 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/Percentile.java @@ -43,7 +43,7 @@ public Percentile replaceChildren(List newChildren) { @Override protected TypeResolution resolveType() { if (!percent.foldable()) { - return new TypeResolution(format(null, "2nd argument of PERCENTILE must be a constant, received [{}]", + return new TypeResolution(format(null, "Second argument of PERCENTILE must be a constant, received [{}]", Expressions.name(percent))); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java index 92bc794b248da..b30b38a01b6c5 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/function/aggregate/PercentileRank.java @@ -43,7 +43,7 @@ public Expression replaceChildren(List newChildren) { @Override protected TypeResolution resolveType() { if (!value.foldable()) { - return new TypeResolution(format(null, "2nd argument of PERCENTILE_RANK must be a constant, received [{}]", + return new TypeResolution(format(null, "Second argument of PERCENTILE_RANK must be a constant, received [{}]", Expressions.name(value))); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java index 76b58babe2832..b3730ee33405e 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java @@ -15,8 +15,8 @@ import org.elasticsearch.xpack.sql.session.Rows; import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; +import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.DataTypes; import org.elasticsearch.xpack.sql.type.EsField; @@ -133,42 +133,46 @@ static void fillInRows(String clusterName, String indexName, Map nestedHitFieldRef(FieldAttribute List nestedRefs = new ArrayList<>(); String name = aliasName(attr); - String format = attr.field().getDataType() == DataType.DATETIME ? "epoch_millis" : DocValueFieldsContext.USE_DEFAULT_FORMAT; + String format = attr.field().getDataType() == DataType.DATETIME ? "epoch_millis" : null; Query q = rewriteToContainNestedField(query, attr.source(), attr.nestedParent().name(), name, format, attr.field().isAggregatable()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java index 50dda656ab4d1..9e56f46949ff6 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java @@ -154,7 +154,7 @@ public void testSqlTranslateActionLicense() throws Exception { .query("SELECT * FROM test").get(); SearchSourceBuilder source = response.source(); assertThat(source.docValueFields(), Matchers.contains( - new DocValueFieldsContext.FieldAndFormat("count", DocValueFieldsContext.USE_DEFAULT_FORMAT))); + new DocValueFieldsContext.FieldAndFormat("count", null))); FetchSourceContext fetchSource = source.fetchSource(); assertThat(fetchSource.includes(), Matchers.arrayContaining("data")); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java index 3dc41ad9dd362..0528b9121d323 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlTranslateActionIT.java @@ -35,7 +35,7 @@ public void testSqlTranslateAction() { assertTrue(fetch.fetchSource()); assertArrayEquals(new String[] { "data" }, fetch.includes()); assertEquals( - singletonList(new DocValueFieldsContext.FieldAndFormat("count", DocValueFieldsContext.USE_DEFAULT_FORMAT)), + singletonList(new DocValueFieldsContext.FieldAndFormat("count", null)), source.docValueFields()); assertEquals(singletonList(SortBuilders.fieldSort("count").missing("_last").unmappedType("long")), source.sorts()); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index 3316b179f50cb..4279910e0e03b 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -538,12 +538,12 @@ public void testAggsInHistogram() { } public void testErrorMessageForPercentileWithSecondArgBasedOnAField() { - assertEquals("1:8: 2nd argument of PERCENTILE must be a constant, received [ABS(int)]", + assertEquals("1:8: Second argument of PERCENTILE must be a constant, received [ABS(int)]", error("SELECT PERCENTILE(int, ABS(int)) FROM test")); } public void testErrorMessageForPercentileRankWithSecondArgBasedOnAField() { - assertEquals("1:8: 2nd argument of PERCENTILE_RANK must be a constant, received [ABS(int)]", + assertEquals("1:8: Second argument of PERCENTILE_RANK must be a constant, received [ABS(int)]", error("SELECT PERCENTILE_RANK(int, ABS(int)) FROM test")); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java index 6e254eca93803..be656411656f4 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java @@ -61,7 +61,7 @@ public void testSysColumns() { public void testSysColumnsInOdbcMode() { List> rows = new ArrayList<>(); SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null, true); - assertEquals(16, rows.size()); + assertEquals(14, rows.size()); assertEquals(24, rows.get(0).size()); List row = rows.get(0); @@ -90,6 +90,16 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); + row = rows.get(3); + assertEquals("keyword", name(row)); + assertEquals((short) Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Short.class, nullable(row).getClass()); + assertEquals(Short.class, sqlDataType(row).getClass()); + assertEquals(Short.class, sqlDataTypeSub(row).getClass()); + row = rows.get(4); assertEquals("date", name(row)); assertEquals((short) Types.TIMESTAMP, sqlType(row)); @@ -101,17 +111,58 @@ public void testSysColumnsInOdbcMode() { assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); + row = rows.get(5); + assertEquals("unsupported", name(row)); + assertEquals((short) Types.OTHER, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(0, precision(row)); + assertEquals(0, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Short.class, nullable(row).getClass()); + assertEquals(Short.class, sqlDataType(row).getClass()); + assertEquals(Short.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(6); + assertEquals("some.dotted.field", name(row)); + assertEquals((short) Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Short.class, nullable(row).getClass()); + assertEquals(Short.class, sqlDataType(row).getClass()); + assertEquals(Short.class, sqlDataTypeSub(row).getClass()); + row = rows.get(7); - assertEquals("some.dotted", name(row)); - assertEquals((short) Types.STRUCT, sqlType(row)); + assertEquals("some.string", name(row)); + assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); - assertEquals(-1, bufferLength(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); assertNull(decimalPrecision(row)); assertEquals(Short.class, nullable(row).getClass()); assertEquals(Short.class, sqlDataType(row).getClass()); assertEquals(Short.class, sqlDataTypeSub(row).getClass()); - row = rows.get(15); + row = rows.get(8); + assertEquals("some.string.normalized", name(row)); + assertEquals((short) Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Short.class, nullable(row).getClass()); + assertEquals(Short.class, sqlDataType(row).getClass()); + assertEquals(Short.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(9); + assertEquals("some.string.typical", name(row)); + assertEquals((short) Types.VARCHAR, sqlType(row)); + assertEquals(null, radix(row)); + assertEquals(Integer.MAX_VALUE, bufferLength(row)); + assertNull(decimalPrecision(row)); + assertEquals(Short.class, nullable(row).getClass()); + assertEquals(Short.class, sqlDataType(row).getClass()); + assertEquals(Short.class, sqlDataTypeSub(row).getClass()); + + row = rows.get(13); assertEquals("some.ambiguous.normalized", name(row)); assertEquals((short) Types.VARCHAR, sqlType(row)); assertEquals(null, radix(row)); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainerTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainerTests.java index 5e675b0be5a72..424964bdbd9f3 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainerTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/container/QueryContainerTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.querydsl.container; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.querydsl.query.BoolQuery; import org.elasticsearch.xpack.sql.querydsl.query.MatchAll; @@ -24,7 +23,7 @@ public class QueryContainerTests extends ESTestCase { private Source source = SourceTests.randomSource(); private String path = randomAlphaOfLength(5); private String name = randomAlphaOfLength(5); - private String format = DocValueFieldsContext.USE_DEFAULT_FORMAT; + private String format = null; private boolean hasDocValues = randomBoolean(); public void testRewriteToContainNestedFieldNoQuery() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQueryTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQueryTests.java index adc733a29c33b..ac6b85538805c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQueryTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/BoolQueryTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.querydsl.query; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.sort.NestedSortBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.tree.Source; @@ -53,15 +52,14 @@ public void testContainsNestedField() { public void testAddNestedField() { Query q = boolQueryWithoutNestedChildren(); - assertSame(q, q.addNestedField(randomAlphaOfLength(5), randomAlphaOfLength(5), DocValueFieldsContext.USE_DEFAULT_FORMAT, - randomBoolean())); + assertSame(q, q.addNestedField(randomAlphaOfLength(5), randomAlphaOfLength(5), null, randomBoolean())); String path = randomAlphaOfLength(5); String field = randomAlphaOfLength(5); q = boolQueryWithNestedChildren(path, field); String newField = randomAlphaOfLength(5); boolean hasDocValues = randomBoolean(); - Query rewritten = q.addNestedField(path, newField, DocValueFieldsContext.USE_DEFAULT_FORMAT, hasDocValues); + Query rewritten = q.addNestedField(path, newField, null, hasDocValues); assertNotSame(q, rewritten); assertTrue(rewritten.containsNestedField(path, newField)); } @@ -87,7 +85,7 @@ private Query boolQueryWithoutNestedChildren() { private Query boolQueryWithNestedChildren(String path, String field) { NestedQuery match = new NestedQuery(SourceTests.randomSource(), path, - singletonMap(field, new SimpleImmutableEntry<>(randomBoolean(), DocValueFieldsContext.USE_DEFAULT_FORMAT)), + singletonMap(field, new SimpleImmutableEntry<>(randomBoolean(), null)), new MatchAll(SourceTests.randomSource())); Query matchAll = new MatchAll(SourceTests.randomSource()); Query left; @@ -108,4 +106,4 @@ public void testToString() { new ExistsQuery(new Source(1, 1, StringUtils.EMPTY), "f1"), new ExistsQuery(new Source(1, 7, StringUtils.EMPTY), "f2")).toString()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/LeafQueryTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/LeafQueryTests.java index 05ef987480910..29ede49d98248 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/LeafQueryTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/LeafQueryTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.querydsl.query; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.sort.NestedSortBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.tree.Source; @@ -54,8 +53,7 @@ public void testContainsNestedField() { public void testAddNestedField() { Query query = new DummyLeafQuery(SourceTests.randomSource()); // Leaf queries don't contain nested fields. - assertSame(query, query.addNestedField(randomAlphaOfLength(5), randomAlphaOfLength(5), DocValueFieldsContext.USE_DEFAULT_FORMAT, - randomBoolean())); + assertSame(query, query.addNestedField(randomAlphaOfLength(5), randomAlphaOfLength(5), null, randomBoolean())); } public void testEnrichNestedSort() { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/NestedQueryTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/NestedQueryTests.java index 5fe69760a694d..818ba04fa1882 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/NestedQueryTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/querydsl/query/NestedQueryTests.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.querydsl.query; -import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.sort.NestedSortBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; @@ -45,7 +44,7 @@ private static Map> randomFields() { int size = between(0, 5); Map> fields = new HashMap<>(size); while (fields.size() < size) { - fields.put(randomAlphaOfLength(5), new SimpleImmutableEntry<>(randomBoolean(), DocValueFieldsContext.USE_DEFAULT_FORMAT)); + fields.put(randomAlphaOfLength(5), new SimpleImmutableEntry<>(randomBoolean(), null)); } return fields; } @@ -80,18 +79,18 @@ public void testAddNestedField() { NestedQuery q = randomNestedQuery(0); for (String field : q.fields().keySet()) { // add does nothing if the field is already there - assertSame(q, q.addNestedField(q.path(), field, DocValueFieldsContext.USE_DEFAULT_FORMAT, randomBoolean())); + assertSame(q, q.addNestedField(q.path(), field, null, randomBoolean())); String otherPath = randomValueOtherThan(q.path(), () -> randomAlphaOfLength(5)); // add does nothing if the path doesn't match - assertSame(q, q.addNestedField(otherPath, randomAlphaOfLength(5), DocValueFieldsContext.USE_DEFAULT_FORMAT, randomBoolean())); + assertSame(q, q.addNestedField(otherPath, randomAlphaOfLength(5), null, randomBoolean())); } // if the field isn't in the list then add rewrites to a query with all the old fields and the new one String newField = randomValueOtherThanMany(q.fields()::containsKey, () -> randomAlphaOfLength(5)); boolean hasDocValues = randomBoolean(); - NestedQuery added = (NestedQuery) q.addNestedField(q.path(), newField, DocValueFieldsContext.USE_DEFAULT_FORMAT, hasDocValues); + NestedQuery added = (NestedQuery) q.addNestedField(q.path(), newField, null, hasDocValues); assertNotSame(q, added); - assertThat(added.fields(), hasEntry(newField, new SimpleImmutableEntry<>(hasDocValues, DocValueFieldsContext.USE_DEFAULT_FORMAT))); + assertThat(added.fields(), hasEntry(newField, new SimpleImmutableEntry<>(hasDocValues, null))); assertTrue(added.containsNestedField(q.path(), newField)); for (Map.Entry> field : q.fields().entrySet()) { assertThat(added.fields(), hasEntry(field.getKey(), field.getValue())); @@ -133,8 +132,8 @@ public void testEnrichNestedSort() { public void testToString() { NestedQuery q = new NestedQuery(new Source(1, 1, StringUtils.EMPTY), "a.b", - singletonMap("f", new SimpleImmutableEntry<>(true, DocValueFieldsContext.USE_DEFAULT_FORMAT)), + singletonMap("f", new SimpleImmutableEntry<>(true, null)), new MatchAll(new Source(1, 1, StringUtils.EMPTY))); - assertEquals("NestedQuery@1:2[a.b.{f=true=use_field_mapping}[MatchAll@1:2[]]]", q.toString()); + assertEquals("NestedQuery@1:2[a.b.{f=true=null}[MatchAll@1:2[]]]", q.toString()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow.json index 635a4e62683bb..588dd60261252 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow.json @@ -11,6 +11,13 @@ "required": true, "description": "The name of the follower index" } + }, + "params": { + "wait_for_active_shards": { + "type" : "string", + "description" : "Sets the number of shard copies that must be active before returning. Defaults to 0. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)", + "default": "0" + } } }, "body": { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.set_upgrade_mode.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.set_upgrade_mode.json index bb3220ece6b13..5406d2d06eceb 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.set_upgrade_mode.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.set_upgrade_mode.json @@ -1,6 +1,6 @@ { "ml.set_upgrade_mode": { - "documentation": "TODO", + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-set-upgrade-mode.html", "methods": [ "POST" ], "url": { "path": "/_ml/set_upgrade_mode", diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml index 0a22a189fa7b6..9fa8e6259f5ff 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/sql/translate.yml @@ -1,9 +1,8 @@ --- "Translate SQL": - skip: - version: " - 6.3.99" - reason: format option was added in 6.4 - features: warnings + version: " - 6.99.99" + reason: Triggers warnings before 7.0 - do: bulk: @@ -29,7 +28,6 @@ excludes: [] docvalue_fields: - field: int - format: use_field_mapping sort: - int: order: asc diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/tests/certgen.bash b/x-pack/qa/vagrant/src/test/resources/packaging/tests/certgen.bash index dd41b93ea6b28..83f967c39891b 100644 --- a/x-pack/qa/vagrant/src/test/resources/packaging/tests/certgen.bash +++ b/x-pack/qa/vagrant/src/test/resources/packaging/tests/certgen.bash @@ -417,3 +417,12 @@ DATA_SETTINGS echo "$testSearch" | grep '"_index":"books"' echo "$testSearch" | grep '"_id":"0"' } + +@test "[$GROUP] exit code on failure" { + run sudo -E -u $MASTER_USER "$MASTER_HOME/bin/elasticsearch-certgen" --not-a-valid-option + [ "$status" -ne 0 ] || { + echo "Expected elasticsearch-certgen tool exit code to be non-zero" + echo "$output" + false + } +}