diff --git a/.backportrc.json b/.backportrc.json index daa1fbef3a090..0edbd94ab8839 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -2,7 +2,7 @@ "upstream": "elastic/elasticsearch", "targetBranchChoices": [ { "name": "master", "checked": true }, - { "name": "7.x", "checked": true }, + "7.16", "7.15", "7.14", "6.8" @@ -10,7 +10,6 @@ "targetPRLabels": ["backport"], "branchLabelMapping": { "^v8.0.0$": "master", - "^v7.16.0$": "7.x", "^v(\\d+).(\\d+).\\d+$": "$1.$2" } } diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml index 7574dc2f03e30..8a71cd08f02f5 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml @@ -21,8 +21,10 @@ github-hooks: true status-context: elasticsearch-ci/packaging-tests-windows-sample cancel-builds-on-update: true - white-list-target-branches: - - master + black-list-target-branches: + - 7.16 + - 7.15 + - 6.8 excluded-regions: - ^docs/.* black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml index 3a9dfe6f74f75..025a02f013041 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml @@ -21,8 +21,10 @@ github-hooks: true status-context: elasticsearch-ci/packaging-tests-windows cancel-builds-on-update: true - white-list-target-branches: - - master + black-list-target-branches: + - 7.16 + - 7.15 + - 6.8 excluded-regions: - ^docs/.* white-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml new file mode 100644 index 0000000000000..91eba35d76e14 --- /dev/null +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml @@ -0,0 +1,49 @@ +--- +- job: + name: "elastic+elasticsearch+pull-request+release-tests" + display-name: "elastic / elasticsearch - pull request release-tests" + description: "Testing of Elasticsearch pull requests - release-tests" + workspace: "/dev/shm/elastic+elasticsearch+pull-request+release-tests" + scm: + - git: + refspec: "+refs/pull/${ghprbPullId}/*:refs/remotes/origin/pr/${ghprbPullId}/*" + branches: + - "${ghprbActualCommit}" + triggers: + - github-pull-request: + org-list: + - elastic + allow-whitelist-orgs-as-admins: true + trigger-phrase: '.*run\W+elasticsearch-ci/release-tests.*' + github-hooks: true + status-context: elasticsearch-ci/release-tests + cancel-builds-on-update: true + excluded-regions: + - ^docs/.* + white-list-labels: + - 'test-release' + black-list-target-branches: + - 7.15 + - 6.8 + builders: + - inject: + properties-file: '.ci/java-versions.properties' + properties-content: | + JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA + RUNTIME_JAVA_HOME=$HOME/.java/$ES_RUNTIME_JAVA + JAVA8_HOME=$HOME/.java/java8 + JAVA11_HOME=$HOME/.java/java11 + JAVA15_HOME=$HOME/.java/openjdk15 + - shell: | + #!/usr/local/bin/runbld --redirect-stderr + + # Fetch beats artifacts + export ES_VERSION=$(grep 'elasticsearch' build-tools-internal/version.properties | awk '{print $3}') + export BEATS_DIR=$(pwd)/distribution/docker/build/artifacts/beats + + mkdir -p ${BEATS_DIR} + curl -o "${BEATS_DIR}/metricbeat-${ES_VERSION}-linux-x86_64.tar.gz" https://snapshots-no-kpi.elastic.co/downloads/beats/metricbeat/metricbeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz + curl -o "${BEATS_DIR}/filebeat-${ES_VERSION}-linux-x86_64.tar.gz" https://snapshots-no-kpi.elastic.co/downloads/beats/filebeat/filebeat-${ES_VERSION}-SNAPSHOT-linux-x86_64.tar.gz + + $WORKSPACE/.ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dbuild.snapshot=false \ + -Dtests.jvm.argline=-Dbuild.snapshot=false -Dlicense.key=${WORKSPACE}/x-pack/license-tools/src/test/resources/public.key -Dbuild.id=deadbeef build diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml index 591730cc00a43..4dbdb0090a20b 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml @@ -18,8 +18,10 @@ github-hooks: true status-context: elasticsearch-ci/rest-compatibility cancel-builds-on-update: true - white-list-target-branches: - - master + black-list-target-branches: + - 7.16 + - 7.15 + - 6.8 excluded-regions: - ^docs/.* black-list-labels: diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java index 09d32d79a508c..79719b03059c8 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/FormattingPrecommitPlugin.java @@ -89,7 +89,6 @@ private Object[] getTargets(String projectPath) { return new String[] { "src/*/java/org/elasticsearch/action/admin/cluster/repositories/**/*.java", "src/*/java/org/elasticsearch/action/admin/cluster/snapshots/**/*.java", - "src/test/java/org/elasticsearch/common/xcontent/support/AbstractFilteringTestCase.java", "src/test/java/org/elasticsearch/common/xcontent/support/XContentMapValuesTests.java", "src/*/java/org/elasticsearch/index/IndexMode.java", "src/*/java/org/elasticsearch/index/IndexRouting.java", @@ -98,6 +97,10 @@ private Object[] getTargets(String projectPath) { "src/*/java/org/elasticsearch/repositories/**/*.java", "src/*/java/org/elasticsearch/search/aggregations/**/*.java", "src/*/java/org/elasticsearch/snapshots/**/*.java" }; + } else if (projectPath.equals(":test:framework")) { + return new String[] { + "src/test/java/org/elasticsearch/common/xcontent/support/AbstractFilteringTestCase.java", + }; } else { // Normally this isn"t necessary, but we have Java sources in // non-standard places @@ -115,6 +118,10 @@ private Object[] getTargets(String projectPath) { ":client:sniffer", ":client:test", ":distribution:archives:integ-test-zip", + ":distribution:bwc:bugfix", + ":distribution:bwc:maintenance", + ":distribution:bwc:minor", + ":distribution:bwc:staged", ":distribution:docker", ":docs", ":example-plugins:custom-settings", @@ -203,7 +210,6 @@ private Object[] getTargets(String projectPath) { ":test:fixtures:geoip-fixture", ":test:fixtures:krb5kdc-fixture", ":test:fixtures:old-elasticsearch", - ":test:framework", ":test:logger-usage", ":x-pack:docs", ":x-pack:license-tools", diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/LicenseHeadersTask.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/LicenseHeadersTask.java index 4f07187f7fc38..45b6b1d142963 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/LicenseHeadersTask.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/conventions/precommit/LicenseHeadersTask.java @@ -36,7 +36,7 @@ import org.w3c.dom.Element; import org.w3c.dom.NodeList; import org.xml.sax.SAXException; - +import org.gradle.api.model.ObjectFactory; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; import java.io.BufferedWriter; @@ -51,6 +51,8 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import javax.inject.Inject; +import java.io.Serializable; /** * Checks files for license headers.. @@ -95,10 +97,6 @@ public List getExcludes() { return excludes; } - public Map getAdditionalLicenses() { - return additionalLicenses; - } - public void setExcludes(List excludes) { this.excludes = excludes; } @@ -106,6 +104,11 @@ public void setExcludes(List excludes) { @OutputFile private File reportFile = new File(getProject().getBuildDir(), "reports/licenseHeaders/rat.xml"); + private static List conventionalLicenses = Arrays.asList( + // Dual SSPLv1 and Elastic + new License("DUAL", "SSPL+Elastic License", "the Elastic License 2.0 or the Server") + ); + /** * Allowed license families for this project. */ @@ -118,13 +121,17 @@ public void setExcludes(List excludes) { */ @Input private List excludes = new ArrayList(); + + private ListProperty additionalLicenses; + /** * Additional license families that may be found. The key is the license category name (5 characters), * followed by the family name and the value list of patterns to search for. */ @Input - protected Map additionalLicenses = new HashMap(); - + public ListProperty getAdditionalLicenses() { + return additionalLicenses; + } /** * Add a new license type. *

@@ -139,7 +146,12 @@ public void additionalLicense(final String categoryName, String familyName, Stri throw new IllegalArgumentException("License category name must be exactly 5 characters, got " + categoryName); } - additionalLicenses.put(categoryName + familyName, pattern); + additionalLicenses.add(new License(categoryName, familyName, pattern)); + } + + @Inject + public LicenseHeadersTask(ObjectFactory objectFactory) { + additionalLicenses = objectFactory.listProperty(License.class).convention(conventionalLicenses); } @TaskAction @@ -160,14 +172,10 @@ public void runRat() { matchers.add(subStringMatcher("GEN ", "Generated", "ANTLR GENERATED CODE")); // Vendored Code matchers.add(subStringMatcher("VEN ", "Vendored", "@notice")); - // Dual SSPLv1 and Elastic - matchers.add(subStringMatcher("DUAL", "SSPL+Elastic License", "the Elastic License 2.0 or the Server")); - for (Map.Entry additional : additionalLicenses.entrySet()) { - String category = additional.getKey().substring(0, 5); - String family = additional.getKey().substring(5); - matchers.add(subStringMatcher(category, family, additional.getValue())); - } + additionalLicenses.get().forEach(l -> + matchers.add(subStringMatcher(l.licenseFamilyCategory, l.licenseFamilyName, l.substringPattern)) + ); reportConfiguration.setHeaderMatcher(new HeaderMatcherMultiplexer(matchers.toArray(IHeaderMatcher[]::new))); reportConfiguration.setApprovedLicenseNames(approvedLicenses.stream().map(license -> { @@ -190,7 +198,6 @@ private IHeaderMatcher subStringMatcher(String licenseFamilyCategory, String lic SubstringLicenseMatcher substringLicenseMatcher = new SubstringLicenseMatcher(); substringLicenseMatcher.setLicenseFamilyCategory(licenseFamilyCategory); substringLicenseMatcher.setLicenseFamilyName(licenseFamilyName); - SubstringLicenseMatcher.Pattern pattern = new SubstringLicenseMatcher.Pattern(); pattern.setSubstring(substringPattern); substringLicenseMatcher.addConfiguredPattern(pattern); @@ -249,4 +256,16 @@ private static List elementList(NodeList resourcesNodes) { } return nodeList; } + + static class License implements Serializable { + private String licenseFamilyCategory; + private String licenseFamilyName; + private String substringPattern; + + public License(String licenseFamilyCategory, String licenseFamilyName, String substringPattern) { + this.licenseFamilyCategory = licenseFamilyCategory; + this.licenseFamilyName = licenseFamilyName; + this.substringPattern = substringPattern; + } + } } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalBwcGitPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalBwcGitPluginFuncTest.groovy index e24d3f1824e03..756562ab02725 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalBwcGitPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalBwcGitPluginFuncTest.groovy @@ -21,11 +21,11 @@ class InternalBwcGitPluginFuncTest extends AbstractGitAwareGradleFuncTest { bwcGitConfig { bwcVersion = project.provider { Version.fromString("7.9.1") } - bwcBranch = project.provider { "7.x" } + bwcBranch = project.provider { "7.9" } checkoutDir = project.provider{file("build/checkout")} } """ - execute("git branch origin/7.x", file("cloned")) + execute("git branch origin/7.9", file("cloned")) } def "current repository can be cloned"() { diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy index 7aa1adbc4865f..2d1a6193189d7 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionBwcSetupPluginFuncTest.groovy @@ -26,41 +26,15 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF buildFile << """ apply plugin: 'elasticsearch.internal-distribution-bwc-setup' """ - execute("git branch origin/7.x", file("cloned")) - execute("git branch origin/7.10", file("cloned")) - } - - def "builds distribution from branches via archives assemble"() { - given: - buildFile.text = "" - internalBuild(buildFile, "7.10.1", "7.11.0", "7.12.0") - buildFile << """ - apply plugin: 'elasticsearch.internal-distribution-bwc-setup' - """ - when: - def result = gradleRunner(":distribution:bwc:${bwcProject}:buildBwcDarwinTar", - ":distribution:bwc:${bwcProject}:buildBwcOssDarwinTar", - "-DtestRemoteRepo=" + remoteGitRepo, - "-Dbwc.remote=origin", - "-Dbwc.dist.version=${bwcDistVersion}-SNAPSHOT") - .build() - then: - result.task(":distribution:bwc:${bwcProject}:buildBwcDarwinTar").outcome == TaskOutcome.SUCCESS - result.task(":distribution:bwc:${bwcProject}:buildBwcOssDarwinTar").outcome == TaskOutcome.SUCCESS - - and: "assemble task triggered" - assertOutputContains(result.output, "[$bwcDistVersion] > Task :distribution:archives:darwin-tar:${expectedAssembleTaskName}") - assertOutputContains(result.output, "[$bwcDistVersion] > Task :distribution:archives:oss-darwin-tar:${expectedAssembleTaskName}") - - where: - bwcDistVersion | bwcProject | expectedAssembleTaskName - "7.10.1" | "bugfix" | "assemble" + execute("git branch origin/8.0", file("cloned")) + execute("git branch origin/7.16", file("cloned")) + execute("git branch origin/7.15", file("cloned")) } def "builds distribution from branches via archives extractedAssemble"() { given: buildFile.text = "" - internalBuild(buildFile, "7.12.1", "7.13.0", "7.14.0") + internalBuild() buildFile << """ apply plugin: 'elasticsearch.internal-distribution-bwc-setup' """ @@ -79,66 +53,29 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF where: bwcDistVersion | bwcProject | expectedAssembleTaskName - "7.14.0" | "minor" | "extractedAssemble" + "8.0.0" | "minor" | "extractedAssemble" + "7.16.0" | "staged" | "extractedAssemble" + "7.15.2" | "bugfix" | "extractedAssemble" } @Unroll def "supports #platform aarch distributions"() { when: def result = gradleRunner(":distribution:bwc:minor:buildBwc${platform.capitalize()}Aarch64Tar", - ":distribution:bwc:minor:buildBwcOss${platform.capitalize()}Aarch64Tar", "-DtestRemoteRepo=" + remoteGitRepo, "-Dbwc.remote=origin", "-Dbwc.dist.version=${bwcDistVersion}-SNAPSHOT") .build() then: result.task(":distribution:bwc:minor:buildBwc${platform.capitalize()}Aarch64Tar").outcome == TaskOutcome.SUCCESS - result.task(":distribution:bwc:minor:buildBwcOss${platform.capitalize()}Aarch64Tar").outcome == TaskOutcome.SUCCESS and: "assemble tasks triggered" assertOutputContains(result.output, "[$bwcDistVersion] > Task :distribution:archives:${platform}-aarch64-tar:extractedAssemble") - assertOutputContains(result.output, "[$bwcDistVersion] > Task :distribution:archives:oss-${platform}-aarch64-tar:extractedAssemble") where: bwcDistVersion | platform - "7.12.0" | "darwin" - "7.12.0" | "linux" - } - - def "bwc distribution archives can be resolved as bwc project artifact"() { - setup: - buildFile << """ - - configurations { - dists - } - - dependencies { - dists project(path: ":distribution:bwc:bugfix", configuration:"darwin-tar") - } - - tasks.register("resolveDistributionArchive") { - inputs.files(configurations.dists) - doLast { - configurations.dists.files.each { - println "distfile " + (it.absolutePath - project.rootDir.absolutePath) - } - } - } - """ - when: - def result = gradleRunner(":resolveDistributionArchive", - "-DtestRemoteRepo=" + remoteGitRepo, - "-Dbwc.remote=origin") - .build() - then: - result.task(":resolveDistributionArchive").outcome == TaskOutcome.SUCCESS - result.task(":distribution:bwc:bugfix:buildBwcDarwinTar").outcome == TaskOutcome.SUCCESS - - and: "assemble task triggered" - result.output.contains("[7.10.1] > Task :distribution:archives:darwin-tar:assemble") - result.output.contains("distfile /distribution/bwc/bugfix/build/bwc/checkout-7.10/distribution/archives/darwin-tar/" + - "build/distributions/elasticsearch-7.10.1-SNAPSHOT-darwin-x86_64.tar.gz") + "8.0.0" | "darwin" + "8.0.0" | "linux" } def "bwc expanded distribution folder can be resolved as bwc project artifact"() { @@ -174,10 +111,10 @@ class InternalDistributionBwcSetupPluginFuncTest extends AbstractGitAwareGradleF result.task(":resolveExpandedDistribution").outcome == TaskOutcome.SUCCESS result.task(":distribution:bwc:minor:buildBwcDarwinTar").outcome == TaskOutcome.SUCCESS and: "assemble task triggered" - result.output.contains("[7.12.0] > Task :distribution:archives:darwin-tar:extractedAssemble") - result.output.contains("expandedRootPath /distribution/bwc/minor/build/bwc/checkout-7.x/" + + result.output.contains("[8.0.0] > Task :distribution:archives:darwin-tar:extractedAssemble") + result.output.contains("expandedRootPath /distribution/bwc/minor/build/bwc/checkout-8.0/" + "distribution/archives/darwin-tar/build/install") - result.output.contains("nested folder /distribution/bwc/minor/build/bwc/checkout-7.x/" + - "distribution/archives/darwin-tar/build/install/elasticsearch-7.12.0-SNAPSHOT") + result.output.contains("nested folder /distribution/bwc/minor/build/bwc/checkout-8.0/" + + "distribution/archives/darwin-tar/build/install/elasticsearch-8.0.0-SNAPSHOT") } } diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy index 24ebcd4d61c90..8d1a038331dca 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginFuncTest.groovy @@ -56,7 +56,7 @@ class InternalDistributionDownloadPluginFuncTest extends AbstractGradleFuncTest elasticsearch_distributions { test_distro { - version = "7.12.0" + version = "8.0.0" type = "archive" platform = "linux" architecture = Architecture.current(); @@ -86,7 +86,7 @@ class InternalDistributionDownloadPluginFuncTest extends AbstractGradleFuncTest elasticsearch_distributions { test_distro { - version = "7.12.0" + version = "8.0.0" type = "archive" platform = "linux" architecture = Architecture.current(); diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/LicenseHeadersPrecommitPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/LicenseHeadersPrecommitPluginFuncTest.groovy index 0481d3315d010..67d2b96fb7b8f 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/LicenseHeadersPrecommitPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/precommit/LicenseHeadersPrecommitPluginFuncTest.groovy @@ -59,6 +59,45 @@ class LicenseHeadersPrecommitPluginFuncTest extends AbstractGradleFuncTest { result.task(":licenseHeaders").outcome == TaskOutcome.SUCCESS } + def "supports sspl by convention"() { + given: + buildFile << """ + plugins { + id 'java' + id 'elasticsearch.internal-licenseheaders' + } + """ + dualLicensedFile() + + when: + def result = gradleRunner("licenseHeaders").build() + + then: + result.task(":licenseHeaders").outcome == TaskOutcome.SUCCESS + } + + def "sspl default additional license can be overridden"() { + given: + buildFile << """ + plugins { + id 'java' + id 'elasticsearch.internal-licenseheaders' + } + + tasks.named("licenseHeaders").configure { + additionalLicense 'ELAST', 'Elastic License 2.0', '2.0; you may not use this file except in compliance with the Elastic License' + } + """ + elasticLicensed() + dualLicensedFile() + + when: + def result = gradleRunner("licenseHeaders").buildAndFail() + + then: + result.task(":licenseHeaders").outcome == TaskOutcome.FAILED + } + private File unapprovedSourceFile(String filePath = "src/main/java/org/acme/UnapprovedLicensed.java") { File sourceFile = file(filePath); sourceFile << """ @@ -115,6 +154,21 @@ class LicenseHeadersPrecommitPluginFuncTest extends AbstractGradleFuncTest { """ } + private File elasticLicensed() { + file("src/main/java/org/acme/ElasticLicensed.java") << """ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + + package org.acme; + public class ElasticLicensed { + } + """ + } + private String packageString(File sourceFile) { String normalizedPath = normalized(sourceFile.getPath()) (normalizedPath.substring(normalizedPath.indexOf("src/main/java")) - "src/main/java/" - ("/" + sourceFile.getName())).replaceAll("/", ".") diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy index b75cb393a2c61..7c7dd533a9a18 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/test/rest/YamlRestCompatTestPluginFuncTest.groovy @@ -30,7 +30,7 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { def "yamlRestTestVxCompatTest does nothing when there are no tests"() { given: - addSubProject(":distribution:bwc:minor") << """ + addSubProject(":distribution:bwc:staged") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -53,11 +53,11 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { result.task(transformTask).outcome == TaskOutcome.NO_SOURCE } - def "yamlRestTestVxCompatTest executes and copies api and transforms tests from :bwc:minor"() { + def "yamlRestTestVxCompatTest executes and copies api and transforms tests from :bwc:staged"() { given: internalBuild() - addSubProject(":distribution:bwc:minor") << """ + addSubProject(":distribution:bwc:staged") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -90,8 +90,8 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { String api = "foo.json" String test = "10_basic.yml" //add the compatible test and api files, these are the prior version's normal yaml rest tests - file("distribution/bwc/minor/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << "" - file("distribution/bwc/minor/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << "" + file("distribution/bwc/staged/checkoutDir/rest-api-spec/src/main/resources/rest-api-spec/api/" + api) << "" + file("distribution/bwc/staged/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/" + test) << "" when: def result = gradleRunner("yamlRestTestV${compatibleVersion}CompatTest").build() @@ -136,7 +136,7 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { def "yamlRestTestVxCompatTest is wired into check and checkRestCompat"() { given: - addSubProject(":distribution:bwc:minor") << """ + addSubProject(":distribution:bwc:staged") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -180,7 +180,7 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { given: internalBuild() - addSubProject(":distribution:bwc:minor") << """ + addSubProject(":distribution:bwc:staged") << """ configurations { checkout } artifacts { checkout(new File(projectDir, "checkoutDir")) @@ -224,7 +224,7 @@ class YamlRestCompatTestPluginFuncTest extends AbstractRestResourcesFuncTest { setupRestResources([], []) - file("distribution/bwc/minor/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """ + file("distribution/bwc/staged/checkoutDir/src/yamlRestTest/resources/rest-api-spec/test/test.yml" ) << """ "one": - do: do_.some.key_to_replace: diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java index 55be56626bb7c..f936913c79375 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/BwcVersions.java @@ -10,16 +10,17 @@ import org.elasticsearch.gradle.Architecture; import org.elasticsearch.gradle.Version; import org.elasticsearch.gradle.VersionProperties; +import org.jetbrains.annotations.NotNull; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; -import java.util.HashMap; +import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; -import java.util.SortedSet; +import java.util.TreeMap; import java.util.TreeSet; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -27,9 +28,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; -import java.util.stream.Stream; -import static java.util.Collections.emptyList; import static java.util.Collections.unmodifiableList; /** @@ -40,28 +39,14 @@ * On top of this, figure out which of these are unreleased and provide the branch they can be built from. *

* Note that in this context, currentVersion is the unreleased version this build operates on. - * At any point in time there will surely be four such unreleased versions being worked on, - * thus currentVersion will be one of these. + * At any point in time there will be at least three such versions and potentially four in the case of a staged release. *

- * Considering: - *

- *
M, M > 0
- *
last released major
- *
N, N > 0
- *
last released minor
- *
- * *
    - *
  • the unreleased major, M+1.0.0 on the `master` branch
  • - *
  • the unreleased minor, M.N.0 on the `M.x` (x is literal) branch
  • - *
  • the unreleased bugfix, M.N.c (c > 0) on the `M.N` branch
  • + *
  • the current version on the `master` branch
  • + *
  • the staged next minor on the `M.N` branch
  • + *
  • the unreleased bugfix, `M.N-1` branch
  • *
  • the unreleased maintenance, M-1.d.e ( d > 0, e > 0) on the `(M-1).d` branch
  • *
- * In addition to these, there will be a fifth one when a minor reaches feature freeze, we call this the staged - * version: - *
    - *
  • the unreleased staged, M.N-2.0 (N > 2) on the `M.(N-2)` branch
  • - *
*

* Each build is only concerned with versions before it, as those are the ones that need to be tested * for backwards compatibility. We never look forward, and don't add forward facing version number to branches of previous @@ -71,7 +56,7 @@ * We can reliably figure out which the unreleased versions are due to the convention of always adding the next unreleased * version number to server in all branches when a version is released. * E.x when M.N.c is released M.N.c+1 is added to the Version class mentioned above in all the following branches: - * `M.N`, `M.x` and `master` so we can reliably assume that the leafs of the version tree are unreleased. + * `M.N`, and `master` so we can reliably assume that the leafs of the version tree are unreleased. * This convention is enforced by checking the versions we consider to be unreleased against an * authoritative source (maven central). * We are then able to map the unreleased version to branches in git and Gradle projects that are capable of checking @@ -80,87 +65,55 @@ public class BwcVersions { private static final Pattern LINE_PATTERN = Pattern.compile( - "\\W+public static final Version V_(\\d+)_(\\d+)_(\\d+)(_alpha\\d+|_beta\\d+|_rc\\d+)? .*" + "\\W+public static final Version V_(\\d+)_(\\d+)_(\\d+)(_alpha\\d+|_beta\\d+|_rc\\d+)? .*?LUCENE_(\\d+)_(\\d+)_(\\d+)\\);" ); + private static final Version MINIMUM_WIRE_COMPATIBLE_VERSION = Version.fromString("7.16.0"); - private final Version currentVersion; - private final Map> groupByMajor; + private final VersionPair currentVersion; + private final List versions; private final Map unreleased; - public class UnreleasedVersionInfo { - public final Version version; - public final String branch; - public final String gradleProjectPath; - - UnreleasedVersionInfo(Version version, String branch, String gradleProjectPath) { - this.version = version; - this.branch = branch; - this.gradleProjectPath = gradleProjectPath; - } - } - public BwcVersions(List versionLines) { this(versionLines, Version.fromString(VersionProperties.getElasticsearch())); } - protected BwcVersions(List versionLines, Version currentVersionProperty) { - this( - versionLines.stream() - .map(LINE_PATTERN::matcher) - .filter(Matcher::matches) - .map( - match -> new Version( - Integer.parseInt(match.group(1)), - Integer.parseInt(match.group(2)), - Integer.parseInt(match.group(3)) - ) - ) - .collect(Collectors.toCollection(TreeSet::new)), - currentVersionProperty - ); - } - - // for testkit tests, until BwcVersions is extracted into an extension - public BwcVersions(SortedSet allVersions, Version currentVersionProperty) { + public BwcVersions(Version currentVersionProperty, List allVersions) { if (allVersions.isEmpty()) { throw new IllegalArgumentException("Could not parse any versions"); } - currentVersion = allVersions.last(); - - groupByMajor = allVersions.stream() - // We only care about the last 2 majors when it comes to BWC. - // It might take us time to remove the older ones from versionLines, so we allow them to exist. - .filter(version -> version.getMajor() > currentVersion.getMajor() - 2) - .collect(Collectors.groupingBy(Version::getMajor, Collectors.toList())); - + this.versions = allVersions; + this.currentVersion = allVersions.get(allVersions.size() - 1); assertCurrentVersionMatchesParsed(currentVersionProperty); - assertNoOlderThanTwoMajors(); + this.unreleased = computeUnreleased(); + } - Map unreleased = new HashMap<>(); - for (Version unreleasedVersion : getUnreleased()) { - unreleased.put( - unreleasedVersion, - new UnreleasedVersionInfo(unreleasedVersion, getBranchFor(unreleasedVersion), getGradleProjectPathFor(unreleasedVersion)) - ); - } - this.unreleased = Collections.unmodifiableMap(unreleased); + // Visible for testing + BwcVersions(List versionLines, Version currentVersionProperty) { + this(currentVersionProperty, parseVersionLines(versionLines)); } - private void assertNoOlderThanTwoMajors() { - Set majors = groupByMajor.keySet(); - if (majors.size() != 2 && currentVersion.getMinor() != 0 && currentVersion.getRevision() != 0) { - throw new IllegalStateException("Expected exactly 2 majors in parsed versions but found: " + majors); - } + private static List parseVersionLines(List versionLines) { + return versionLines.stream() + .map(LINE_PATTERN::matcher) + .filter(Matcher::matches) + .map( + match -> new VersionPair( + new Version(Integer.parseInt(match.group(1)), Integer.parseInt(match.group(2)), Integer.parseInt(match.group(3))), + new Version(Integer.parseInt(match.group(5)), Integer.parseInt(match.group(6)), Integer.parseInt(match.group(7))) + ) + ) + .sorted() + .toList(); } private void assertCurrentVersionMatchesParsed(Version currentVersionProperty) { - if (currentVersionProperty.equals(currentVersion) == false) { + if (currentVersionProperty.equals(currentVersion.elasticsearch) == false) { throw new IllegalStateException( "Parsed versions latest version does not match the one configured in build properties. " + "Parsed latest version is " - + currentVersion + + currentVersion.elasticsearch + " but the build has " + currentVersionProperty ); @@ -175,130 +128,78 @@ public UnreleasedVersionInfo unreleasedInfo(Version version) { } public void forPreviousUnreleased(Consumer consumer) { - List collect = filterSupportedVersions( - getUnreleased().stream().filter(version -> version.equals(currentVersion) == false).collect(Collectors.toList()) - ).stream() - .map(version -> new UnreleasedVersionInfo(version, getBranchFor(version), getGradleProjectPathFor(version))) - .collect(Collectors.toList()); - - collect.forEach(consumer::accept); - } - - private String getGradleProjectPathFor(Version version) { - // We have Gradle projects set up to check out and build unreleased versions based on the our branching - // conventions described in this classes javadoc - if (version.equals(currentVersion)) { - return ":distribution"; - } - - Map> releasedMajorGroupedByMinor = getReleasedMajorGroupedByMinor(); - - if (version.getRevision() == 0) { - List unreleasedStagedOrMinor = getUnreleased().stream().filter(v -> v.getRevision() == 0).collect(Collectors.toList()); - if (unreleasedStagedOrMinor.size() > 2) { - if (unreleasedStagedOrMinor.get(unreleasedStagedOrMinor.size() - 2).equals(version)) { - return ":distribution:bwc:minor"; - } else { - return ":distribution:bwc:staged"; - } - } else { - return ":distribution:bwc:minor"; - } - } else { - if (releasedMajorGroupedByMinor.getOrDefault(version.getMinor(), emptyList()).contains(version)) { - return ":distribution:bwc:bugfix"; - } else { - return ":distribution:bwc:maintenance"; - } - } + filterSupportedVersions( + getUnreleased().stream().filter(version -> version.equals(currentVersion.elasticsearch) == false).collect(Collectors.toList()) + ).stream().map(unreleased::get).forEach(consumer); } private String getBranchFor(Version version) { - // based on the rules described in this classes javadoc, figure out the branch on which an unreleased version - // lives. - // We do this based on the Gradle project path because there's a direct correlation, so we dont have to duplicate - // the logic from there - switch (getGradleProjectPathFor(version)) { - case ":distribution": - return "master"; - case ":distribution:bwc:minor": - // The .x branch will always point to the latest minor (for that major), so a "minor" project will be on the .x branch - // unless there is more recent (higher) minor. - final Version latestInMajor = getLatestVersionByKey(groupByMajor, version.getMajor()); - if (latestInMajor.getMinor() == version.getMinor()) { - return version.getMajor() + ".x"; - } else { - return version.getMajor() + "." + version.getMinor(); - } - case ":distribution:bwc:staged": - case ":distribution:bwc:maintenance": - case ":distribution:bwc:bugfix": - return version.getMajor() + "." + version.getMinor(); - default: - throw new IllegalStateException("Unexpected Gradle project name"); + if (version.equals(currentVersion.elasticsearch)) { + // Just assume the current branch is 'master'. It's actually not important, we never check out the current branch. + return "master"; + } else { + return version.getMajor() + "." + version.getMinor(); } } - public List getUnreleased() { - List unreleased = new ArrayList<>(); + private Map computeUnreleased() { + Set unreleased = new TreeSet<>(); // The current version is being worked, is always unreleased unreleased.add(currentVersion); - - // the tip of the previous major is unreleased for sure, be it a minor or a bugfix - final Version latestOfPreviousMajor = getLatestVersionByKey(this.groupByMajor, currentVersion.getMajor() - 1); - unreleased.add(latestOfPreviousMajor); - if (latestOfPreviousMajor.getRevision() == 0) { - // if the previous major is a x.y.0 release, then the tip of the minor before that (y-1) is also unreleased - final Version previousMinor = getLatestInMinor(latestOfPreviousMajor.getMajor(), latestOfPreviousMajor.getMinor() - 1); - if (previousMinor != null) { - unreleased.add(previousMinor); - } - } - - final Map> groupByMinor = getReleasedMajorGroupedByMinor(); - int greatestMinor = groupByMinor.keySet().stream().max(Integer::compareTo).orElse(0); - - // the last bugfix for this minor series is always unreleased - unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor)); - - if (groupByMinor.get(greatestMinor).size() == 1) { - // we found an unreleased minor - unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 1)); - if (groupByMinor.getOrDefault(greatestMinor - 1, emptyList()).size() == 1) { - // we found that the previous minor is staged but not yet released - // in this case, the minor before that has a bugfix, should there be such a minor - if (greatestMinor >= 2) { - unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 2)); + // Recurse for all unreleased versions starting from the current version + addUnreleased(unreleased, currentVersion, 0); + + // Grab the latest version from the previous major if necessary as well, this is going to be a maintenance release + VersionPair maintenance = versions.stream() + .filter(v -> v.elasticsearch.getMajor() == currentVersion.elasticsearch.getMajor() - 1) + .sorted(Comparator.reverseOrder()) + .findFirst() + .orElseThrow(); + // This is considered the maintenance release only if we haven't yet encountered it + boolean hasMaintenanceRelease = unreleased.add(maintenance); + + List unreleasedList = unreleased.stream().sorted(Comparator.reverseOrder()).toList(); + Map result = new TreeMap<>(); + for (int i = 0; i < unreleasedList.size(); i++) { + Version esVersion = unreleasedList.get(i).elasticsearch; + // This is either a new minor or staged release + if (currentVersion.elasticsearch.equals(esVersion)) { + result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution")); + } else if (esVersion.getRevision() == 0) { + // If there are two upcoming unreleased minors then this one is the new minor + if (unreleasedList.get(i + 1).elasticsearch.getRevision() == 0) { + result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:minor")); + } else { + result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:staged")); + } + } else { + // If this is the oldest unreleased version and we have a maintenance release + if (i == unreleasedList.size() - 1 && hasMaintenanceRelease) { + result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:maintenance")); + } else { + result.put(esVersion, new UnreleasedVersionInfo(esVersion, getBranchFor(esVersion), ":distribution:bwc:bugfix")); } } } - return unmodifiableList(unreleased.stream().sorted().distinct().collect(Collectors.toList())); + return Collections.unmodifiableMap(result); } - private Version getLatestInMinor(int major, int minor) { - return groupByMajor.get(major).stream().filter(v -> v.getMinor() == minor).max(Version::compareTo).orElse(null); - } - - private Version getLatestVersionByKey(Map> groupByMajor, int key) { - return groupByMajor.getOrDefault(key, emptyList()) - .stream() - .max(Version::compareTo) - .orElseThrow(() -> new IllegalStateException("Unexpected number of versions in collection")); + public List getUnreleased() { + return unreleased.keySet().stream().sorted().toList(); } - private Map> getReleasedMajorGroupedByMinor() { - List currentMajorVersions = groupByMajor.get(currentVersion.getMajor()); - List previousMajorVersions = groupByMajor.get(currentVersion.getMajor() - 1); + private void addUnreleased(Set unreleased, VersionPair current, int index) { + if (current.elasticsearch.getRevision() == 0) { + // If the current version is a new minor, the next version is also unreleased + VersionPair next = versions.get(versions.size() - (index + 2)); + unreleased.add(next); - final Map> groupByMinor; - if (currentMajorVersions.size() == 1) { - // Current is an unreleased major: x.0.0 so we have to look for other unreleased versions in the previous major - groupByMinor = previousMajorVersions.stream().collect(Collectors.groupingBy(Version::getMinor, Collectors.toList())); + // Keep looking through versions until we find the end of unreleased versions + addUnreleased(unreleased, next, index + 1); } else { - groupByMinor = currentMajorVersions.stream().collect(Collectors.groupingBy(Version::getMinor, Collectors.toList())); + unreleased.add(current); } - return groupByMinor; } public void compareToAuthoritative(List authoritativeReleasedVersions) { @@ -326,20 +227,13 @@ public void compareToAuthoritative(List authoritativeReleasedVersions) } private List getReleased() { - List unreleased = getUnreleased(); - return groupByMajor.values() - .stream() - .flatMap(Collection::stream) - .filter(each -> unreleased.contains(each) == false) - .collect(Collectors.toList()); + return versions.stream().map(v -> v.elasticsearch).filter(v -> unreleased.containsKey(v) == false).toList(); } public List getIndexCompatible() { - var indexCompatibles = Stream.concat( - groupByMajor.get(currentVersion.getMajor() - 1).stream(), - groupByMajor.get(currentVersion.getMajor()).stream() - ).collect(Collectors.toList()); - return unmodifiableList(filterSupportedVersions(indexCompatibles)); + return filterSupportedVersions( + versions.stream().filter(v -> v.lucene.getMajor() >= (currentVersion.lucene.getMajor() - 1)).map(v -> v.elasticsearch).toList() + ); } public void withIndexCompatiple(BiConsumer versionAction) { @@ -351,16 +245,9 @@ public void withIndexCompatiple(Predicate filter, BiConsumer getWireCompatible() { - List wireCompat = new ArrayList<>(); - List prevMajors = groupByMajor.get(currentVersion.getMajor() - 1); - int minor = prevMajors.get(prevMajors.size() - 1).getMinor(); - for (int i = prevMajors.size() - 1; i > 0 && prevMajors.get(i).getMinor() == minor; i--) { - wireCompat.add(prevMajors.get(i)); - } - wireCompat.addAll(groupByMajor.get(currentVersion.getMajor())); - wireCompat.sort(Version::compareTo); - - return unmodifiableList(filterSupportedVersions(wireCompat)); + return filterSupportedVersions( + versions.stream().map(v -> v.elasticsearch).filter(v -> v.compareTo(MINIMUM_WIRE_COMPATIBLE_VERSION) >= 0).toList() + ); } public void withWireCompatiple(BiConsumer versionAction) { @@ -389,4 +276,44 @@ public List getUnreleasedWireCompatible() { return unmodifiableList(unreleasedWireCompatible); } + public static class UnreleasedVersionInfo { + public final Version version; + public final String branch; + public final String gradleProjectPath; + + public UnreleasedVersionInfo(Version version, String branch, String gradleProjectPath) { + this.version = version; + this.branch = branch; + this.gradleProjectPath = gradleProjectPath; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + UnreleasedVersionInfo that = (UnreleasedVersionInfo) o; + return version.equals(that.version) && branch.equals(that.branch) && gradleProjectPath.equals(that.gradleProjectPath); + } + + @Override + public int hashCode() { + return Objects.hash(version, branch, gradleProjectPath); + } + } + + public static class VersionPair implements Comparable { + public final Version elasticsearch; + public final Version lucene; + + public VersionPair(Version elasticsearch, Version lucene) { + this.elasticsearch = elasticsearch; + this.lucene = lucene; + } + + @Override + public int compareTo(@NotNull VersionPair o) { + // For ordering purposes, sort by Elasticsearch version + return this.elasticsearch.compareTo(o.elasticsearch); + } + } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java index 3077bcd70e10a..97b2776ee79cf 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/info/GlobalBuildInfoPlugin.java @@ -46,6 +46,7 @@ import java.util.Locale; import java.util.Map; import java.util.Random; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -108,7 +109,8 @@ public void apply(Project project) { params.setDefaultParallel(ParallelDetector.findDefaultParallel(project)); params.setInFipsJvm(Util.getBooleanProperty("tests.fips.enabled", false)); params.setIsSnapshotBuild(Util.getBooleanProperty("build.snapshot", true)); - params.setBwcVersions(providers.provider(() -> resolveBwcVersions(rootDir))); + AtomicReference cache = new AtomicReference<>(); + params.setBwcVersions(providers.provider(() -> cache.updateAndGet(val -> val == null ? resolveBwcVersions(rootDir) : val))); }); // Enforce the minimum compiler version diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java index 4f75f5cd297f6..14249b594556d 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/rest/compat/YamlRestCompatTestPlugin.java @@ -81,7 +81,7 @@ public void apply(Project project) { // copy compatible rest specs Configuration bwcMinorConfig = project.getConfigurations().create(BWC_MINOR_CONFIG_NAME); - Dependency bwcMinor = project.getDependencies().project(Map.of("path", ":distribution:bwc:minor", "configuration", "checkout")); + Dependency bwcMinor = project.getDependencies().project(Map.of("path", ":distribution:bwc:staged", "configuration", "checkout")); project.getDependencies().add(bwcMinorConfig.getName(), bwcMinor); Provider copyCompatYamlSpecTask = project.getTasks() diff --git a/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy new file mode 100644 index 0000000000000..9f12e9b8388a8 --- /dev/null +++ b/build-tools-internal/src/test/groovy/org/elasticsearch/gradle/internal/BwcVersionsSpec.groovy @@ -0,0 +1,187 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal + +import spock.lang.Specification + +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.internal.BwcVersions.UnreleasedVersionInfo + + +class BwcVersionsSpec extends Specification { + List versionLines = [] + + def "current version is next major with last minor staged"() { + given: + addVersion('7.14.0', '8.9.0') + addVersion('7.14.1', '8.9.0') + addVersion('7.14.2', '8.9.0') + addVersion('7.15.0', '8.9.0') + addVersion('7.15.1', '8.9.0') + addVersion('7.15.2', '8.9.0') + addVersion('7.16.0', '8.10.0') + addVersion('8.0.0', '9.0.0') + + when: + def bwc = new BwcVersions(versionLines, v('8.0.0')) + def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } + + then: + unreleased == [ + (v('7.15.2')): new UnreleasedVersionInfo(v('7.15.2'), '7.15', ':distribution:bwc:bugfix'), + (v('7.16.0')): new UnreleasedVersionInfo(v('7.16.0'), '7.16', ':distribution:bwc:staged'), + (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), 'master', ':distribution') + ] + bwc.wireCompatible == [v('7.16.0'), v('8.0.0')] + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('8.0.0')] + } + + def "current version is next minor with next major and last minor both staged"() { + given: + addVersion('7.14.0', '8.9.0') + addVersion('7.14.1', '8.9.0') + addVersion('7.14.2', '8.9.0') + addVersion('7.15.0', '8.9.0') + addVersion('7.15.1', '8.9.0') + addVersion('7.15.2', '8.9.0') + addVersion('7.16.0', '8.10.0') + addVersion('8.0.0', '9.0.0') + addVersion('8.1.0', '9.1.0') + + when: + def bwc = new BwcVersions(versionLines, v('8.1.0')) + def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } + + then: + unreleased == [ + (v('7.15.2')): new UnreleasedVersionInfo(v('7.15.2'), '7.15', ':distribution:bwc:bugfix'), + (v('7.16.0')): new UnreleasedVersionInfo(v('7.16.0'), '7.16', ':distribution:bwc:staged'), + (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), '8.0', ':distribution:bwc:minor'), + (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'master', ':distribution') + ] + bwc.wireCompatible == [v('7.16.0'), v('8.0.0'), v('8.1.0')] + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('8.0.0'), v('8.1.0')] + } + + def "current is next minor with upcoming minor staged"() { + given: + addVersion('7.14.0', '8.9.0') + addVersion('7.14.1', '8.9.0') + addVersion('7.14.2', '8.9.0') + addVersion('7.15.0', '8.9.0') + addVersion('7.15.1', '8.9.0') + addVersion('7.15.2', '8.9.0') + addVersion('7.16.0', '8.10.0') + addVersion('7.16.1', '8.10.0') + addVersion('8.0.0', '9.0.0') + addVersion('8.1.0', '9.1.0') + + when: + def bwc = new BwcVersions(versionLines, v('8.1.0')) + def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } + + then: + unreleased == [ + (v('7.16.1')): new UnreleasedVersionInfo(v('7.16.1'), '7.16', ':distribution:bwc:bugfix'), + (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), '8.0', ':distribution:bwc:staged'), + (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'master', ':distribution') + ] + bwc.wireCompatible == [v('7.16.0'), v('7.16.1'), v('8.0.0'), v('8.1.0')] + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('8.0.0'), v('8.1.0')] + } + + def "current version is staged major"() { + given: + addVersion('7.14.0', '8.9.0') + addVersion('7.14.1', '8.9.0') + addVersion('7.14.2', '8.9.0') + addVersion('7.15.0', '8.9.0') + addVersion('7.15.1', '8.9.0') + addVersion('7.15.2', '8.9.0') + addVersion('7.16.0', '8.10.0') + addVersion('7.16.1', '8.10.0') + addVersion('8.0.0', '9.0.0') + + when: + def bwc = new BwcVersions(versionLines, v('8.0.0')) + def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } + + then: + unreleased == [ + (v('7.16.1')): new UnreleasedVersionInfo(v('7.16.1'), '7.16', ':distribution:bwc:bugfix'), + (v('8.0.0')): new UnreleasedVersionInfo(v('8.0.0'), 'master', ':distribution'), + ] + bwc.wireCompatible == [v('7.16.0'), v('7.16.1'), v('8.0.0')] + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('8.0.0')] + } + + def "current version is next bugfix"() { + given: + addVersion('7.14.0', '8.9.0') + addVersion('7.14.1', '8.9.0') + addVersion('7.14.2', '8.9.0') + addVersion('7.15.0', '8.9.0') + addVersion('7.15.1', '8.9.0') + addVersion('7.15.2', '8.9.0') + addVersion('7.16.0', '8.10.0') + addVersion('7.16.1', '8.10.0') + addVersion('8.0.0', '9.0.0') + addVersion('8.0.1', '9.0.0') + + when: + def bwc = new BwcVersions(versionLines, v('8.0.1')) + def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } + + then: + unreleased == [ + (v('7.16.1')): new UnreleasedVersionInfo(v('7.16.1'), '7.16', ':distribution:bwc:maintenance'), + (v('8.0.1')): new UnreleasedVersionInfo(v('8.0.1'), 'master', ':distribution'), + ] + bwc.wireCompatible == [v('7.16.0'), v('7.16.1'), v('8.0.0'), v('8.0.1')] + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('8.0.0'), v('8.0.1')] + } + + def "current version is next minor with no staged releases"() { + given: + addVersion('7.14.0', '8.9.0') + addVersion('7.14.1', '8.9.0') + addVersion('7.14.2', '8.9.0') + addVersion('7.15.0', '8.9.0') + addVersion('7.15.1', '8.9.0') + addVersion('7.15.2', '8.9.0') + addVersion('7.16.0', '8.10.0') + addVersion('7.16.1', '8.10.0') + addVersion('8.0.0', '9.0.0') + addVersion('8.0.1', '9.0.0') + addVersion('8.1.0', '9.1.0') + + when: + def bwc = new BwcVersions(versionLines, v('8.1.0')) + def unreleased = bwc.unreleased.collectEntries { [it, bwc.unreleasedInfo(it)] } + + then: + unreleased == [ + (v('7.16.1')): new UnreleasedVersionInfo(v('7.16.1'), '7.16', ':distribution:bwc:maintenance'), + (v('8.0.1')): new UnreleasedVersionInfo(v('8.0.1'), '8.0', ':distribution:bwc:bugfix'), + (v('8.1.0')): new UnreleasedVersionInfo(v('8.1.0'), 'master', ':distribution') + ] + bwc.wireCompatible == [v('7.16.0'), v('7.16.1'), v('8.0.0'), v('8.0.1'), v('8.1.0')] + bwc.indexCompatible == [v('7.14.0'), v('7.14.1'), v('7.14.2'), v('7.15.0'), v('7.15.1'), v('7.15.2'), v('7.16.0'), v('7.16.1'), v('8.0.0'), v('8.0.1'), v('8.1.0')] + } + + private void addVersion(String elasticsearch, String lucene) { + def es = Version.fromString(elasticsearch) + def l = Version.fromString(lucene) + versionLines << " public static final Version V_${es.major}_${es.minor}_${es.revision} = new Version(0000000, org.apache.lucene.util.Version.LUCENE_${l.major}_${l.minor}_${l.revision});".toString() + } + + private Version v(String version) { + return Version.fromString(version) + } +} diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java index 64798ec5ccab5..64b5268d451cd 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/AbstractDistributionDownloadPluginTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.gradle; import org.elasticsearch.gradle.internal.BwcVersions; +import org.elasticsearch.gradle.internal.BwcVersions.VersionPair; import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.gradle.api.NamedDomainObjectContainer; import org.gradle.api.Project; @@ -16,7 +17,6 @@ import java.io.File; import java.util.Arrays; -import java.util.TreeSet; public class AbstractDistributionDownloadPluginTests extends GradleUnitTestCase { protected static Project rootProject; @@ -24,27 +24,27 @@ public class AbstractDistributionDownloadPluginTests extends GradleUnitTestCase protected static Project packagesProject; protected static Project bwcProject; - protected static final Version BWC_MAJOR_VERSION = Version.fromString("2.0.0"); - protected static final Version BWC_MINOR_VERSION = Version.fromString("1.1.0"); - protected static final Version BWC_STAGED_VERSION = Version.fromString("1.0.0"); - protected static final Version BWC_BUGFIX_VERSION = Version.fromString("1.0.1"); - protected static final Version BWC_MAINTENANCE_VERSION = Version.fromString("0.90.1"); + protected static final VersionPair BWC_MAJOR_VERSION = new VersionPair(Version.fromString("2.0.0"), Version.fromString("3.0.0")); + protected static final VersionPair BWC_MINOR_VERSION = new VersionPair(Version.fromString("1.1.0"), Version.fromString("2.1.0")); + protected static final VersionPair BWC_STAGED_VERSION = new VersionPair(Version.fromString("1.0.0"), Version.fromString("2.0.0")); + protected static final VersionPair BWC_BUGFIX_VERSION = new VersionPair(Version.fromString("1.0.1"), Version.fromString("2.0.0")); + protected static final VersionPair BWC_MAINTENANCE_VERSION = new VersionPair(Version.fromString("0.90.1"), Version.fromString("1.1.3")); protected static final BwcVersions BWC_MINOR = new BwcVersions( - new TreeSet<>(Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)), - BWC_MAJOR_VERSION + BWC_MAJOR_VERSION.elasticsearch, + Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION) ); protected static final BwcVersions BWC_STAGED = new BwcVersions( - new TreeSet<>(Arrays.asList(BWC_STAGED_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)), - BWC_MAJOR_VERSION + BWC_MAJOR_VERSION.elasticsearch, + Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_STAGED_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION) ); protected static final BwcVersions BWC_BUGFIX = new BwcVersions( - new TreeSet<>(Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION)), - BWC_MAJOR_VERSION + BWC_MAJOR_VERSION.elasticsearch, + Arrays.asList(BWC_BUGFIX_VERSION, BWC_MINOR_VERSION, BWC_MAJOR_VERSION) ); protected static final BwcVersions BWC_MAINTENANCE = new BwcVersions( - new TreeSet<>(Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_STAGED_VERSION, BWC_MINOR_VERSION)), - BWC_MINOR_VERSION + BWC_MINOR_VERSION.elasticsearch, + Arrays.asList(BWC_MAINTENANCE_VERSION, BWC_BUGFIX_VERSION, BWC_MINOR_VERSION) ); protected static String projectName(String base, boolean bundledJdk) { diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java index 7da15e42a03c3..e66ccab543a3e 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/DistributionDownloadPluginTests.java @@ -132,10 +132,10 @@ public void testLocalBwcArchives() { String configName = projectName(platform.toString(), true); configName += (platform == Platform.WINDOWS ? "-zip" : "-tar"); ElasticsearchDistributionType archiveType = ElasticsearchDistributionTypes.ARCHIVE; - checkBwc("minor", configName, BWC_MINOR_VERSION, archiveType, platform, BWC_MINOR); - checkBwc("staged", configName, BWC_STAGED_VERSION, archiveType, platform, BWC_STAGED); - checkBwc("bugfix", configName, BWC_BUGFIX_VERSION, archiveType, platform, BWC_BUGFIX); - checkBwc("maintenance", configName, BWC_MAINTENANCE_VERSION, archiveType, platform, BWC_MAINTENANCE); + checkBwc("minor", configName, BWC_MINOR_VERSION.elasticsearch, archiveType, platform, BWC_MINOR); + checkBwc("staged", configName, BWC_STAGED_VERSION.elasticsearch, archiveType, platform, BWC_STAGED); + checkBwc("bugfix", configName, BWC_BUGFIX_VERSION.elasticsearch, archiveType, platform, BWC_BUGFIX); + checkBwc("maintenance", configName, BWC_MAINTENANCE_VERSION.elasticsearch, archiveType, platform, BWC_MAINTENANCE); } } diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/BwcVersionsTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/BwcVersionsTests.java deleted file mode 100644 index ca6a530eef3b8..0000000000000 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/BwcVersionsTests.java +++ /dev/null @@ -1,774 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.gradle.internal; - -import org.elasticsearch.gradle.Architecture; -import org.elasticsearch.gradle.Version; -import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; -import org.junit.Assume; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static java.util.Arrays.asList; -import static java.util.Collections.singletonList; - -public class BwcVersionsTests extends GradleUnitTestCase { - - private static final Map> sampleVersions = new HashMap<>(); - - @Rule - public ExpectedException expectedEx = ExpectedException.none(); - - static { - // unreleased major and two unreleased minors ( minor in feature freeze ) - sampleVersions.put("8.0.0", asList("7_0_0", "7_0_1", "7_1_0", "7_1_1", "7_2_0", "7_3_0", "8.0.0")); - sampleVersions.put( - "7.0.0-alpha1", - asList( - "6_0_0_alpha1", - "6_0_0_alpha2", - "6_0_0_beta1", - "6_0_0_beta2", - "6_0_0_rc1", - "6_0_0_rc2", - "6_0_0", - "6_0_1", - "6_1_0", - "6_1_1", - "6_1_2", - "6_1_3", - "6_1_4", - "6_2_0", - "6_2_1", - "6_2_2", - "6_2_3", - "6_2_4", - "6_3_0", - "6_3_1", - "6_3_2", - "6_4_0", - "6_4_1", - "6_4_2", - "6_5_0", - "7_0_0_alpha1" - ) - ); - sampleVersions.put( - "6.5.0", - asList( - "5_0_0_alpha1", - "5_0_0_alpha2", - "5_0_0_alpha3", - "5_0_0_alpha4", - "5_0_0_alpha5", - "5_0_0_beta1", - "5_0_0_rc1", - "5_0_0", - "5_0_1", - "5_0_2", - "5_1_1", - "5_1_2", - "5_2_0", - "5_2_1", - "5_2_2", - "5_3_0", - "5_3_1", - "5_3_2", - "5_3_3", - "5_4_0", - "5_4_1", - "5_4_2", - "5_4_3", - "5_5_0", - "5_5_1", - "5_5_2", - "5_5_3", - "5_6_0", - "5_6_1", - "5_6_2", - "5_6_3", - "5_6_4", - "5_6_5", - "5_6_6", - "5_6_7", - "5_6_8", - "5_6_9", - "5_6_10", - "5_6_11", - "5_6_12", - "5_6_13", - "6_0_0_alpha1", - "6_0_0_alpha2", - "6_0_0_beta1", - "6_0_0_beta2", - "6_0_0_rc1", - "6_0_0_rc2", - "6_0_0", - "6_0_1", - "6_1_0", - "6_1_1", - "6_1_2", - "6_1_3", - "6_1_4", - "6_2_0", - "6_2_1", - "6_2_2", - "6_2_3", - "6_2_4", - "6_3_0", - "6_3_1", - "6_3_2", - "6_4_0", - "6_4_1", - "6_4_2", - "6_5_0" - ) - ); - sampleVersions.put( - "6.6.0", - asList( - "5_0_0_alpha1", - "5_0_0_alpha2", - "5_0_0_alpha3", - "5_0_0_alpha4", - "5_0_0_alpha5", - "5_0_0_beta1", - "5_0_0_rc1", - "5_0_0", - "5_0_1", - "5_0_2", - "5_1_1", - "5_1_2", - "5_2_0", - "5_2_1", - "5_2_2", - "5_3_0", - "5_3_1", - "5_3_2", - "5_3_3", - "5_4_0", - "5_4_1", - "5_4_2", - "5_4_3", - "5_5_0", - "5_5_1", - "5_5_2", - "5_5_3", - "5_6_0", - "5_6_1", - "5_6_2", - "5_6_3", - "5_6_4", - "5_6_5", - "5_6_6", - "5_6_7", - "5_6_8", - "5_6_9", - "5_6_10", - "5_6_11", - "5_6_12", - "5_6_13", - "6_0_0_alpha1", - "6_0_0_alpha2", - "6_0_0_beta1", - "6_0_0_beta2", - "6_0_0_rc1", - "6_0_0_rc2", - "6_0_0", - "6_0_1", - "6_1_0", - "6_1_1", - "6_1_2", - "6_1_3", - "6_1_4", - "6_2_0", - "6_2_1", - "6_2_2", - "6_2_3", - "6_2_4", - "6_3_0", - "6_3_1", - "6_3_2", - "6_4_0", - "6_4_1", - "6_4_2", - "6_5_0", - "6_6_0" - ) - ); - sampleVersions.put( - "6.4.2", - asList( - "5_0_0_alpha1", - "5_0_0_alpha2", - "5_0_0_alpha3", - "5_0_0_alpha4", - "5_0_0_alpha5", - "5_0_0_beta1", - "5_0_0_rc1", - "5_0_0", - "5_0_1", - "5_0_2", - "5_1_1", - "5_1_2", - "5_2_0", - "5_2_1", - "5_2_2", - "5_3_0", - "5_3_1", - "5_3_2", - "5_3_3", - "5_4_0", - "5_4_1", - "5_4_2", - "5_4_3", - "5_5_0", - "5_5_1", - "5_5_2", - "5_5_3", - "5_6_0", - "5_6_1", - "5_6_2", - "5_6_3", - "5_6_4", - "5_6_5", - "5_6_6", - "5_6_7", - "5_6_8", - "5_6_9", - "5_6_10", - "5_6_11", - "5_6_12", - "5_6_13", - "6_0_0_alpha1", - "6_0_0_alpha2", - "6_0_0_beta1", - "6_0_0_beta2", - "6_0_0_rc1", - "6_0_0_rc2", - "6_0_0", - "6_0_1", - "6_1_0", - "6_1_1", - "6_1_2", - "6_1_3", - "6_1_4", - "6_2_0", - "6_2_1", - "6_2_2", - "6_2_3", - "6_2_4", - "6_3_0", - "6_3_1", - "6_3_2", - "6_4_0", - "6_4_1", - "6_4_2" - ) - ); - sampleVersions.put("7.1.0", asList("7_1_0", "7_0_0", "6_7_0", "6_6_1", "6_6_0")); - } - - @BeforeClass - public static void setupAll() { - Assume.assumeFalse(Architecture.current() == Architecture.AARCH64); - } - - @Test(expected = IllegalArgumentException.class) - public void testExceptionOnEmpty() { - new BwcVersions(asList("foo", "bar"), Version.fromString("7.0.0")); - } - - @Test(expected = IllegalStateException.class) - public void testExceptionOnNonCurrent() { - new BwcVersions(singletonList(formatVersionToLine("6.5.0")), Version.fromString("7.0.0")); - } - - @Test(expected = IllegalStateException.class) - public void testExceptionOnTooManyMajors() { - new BwcVersions( - asList(formatVersionToLine("5.6.12"), formatVersionToLine("6.5.0"), formatVersionToLine("7.0.0")), - Version.fromString("6.5.0") - ); - } - - public void testWireCompatible() { - assertVersionsEquals(asList("6.5.0", "7.0.0"), getVersionCollection("7.0.0-alpha1").getWireCompatible()); - assertVersionsEquals( - asList( - "5.6.0", - "5.6.1", - "5.6.2", - "5.6.3", - "5.6.4", - "5.6.5", - "5.6.6", - "5.6.7", - "5.6.8", - "5.6.9", - "5.6.10", - "5.6.11", - "5.6.12", - "5.6.13", - "6.0.0", - "6.0.1", - "6.1.0", - "6.1.1", - "6.1.2", - "6.1.3", - "6.1.4", - "6.2.0", - "6.2.1", - "6.2.2", - "6.2.3", - "6.2.4", - "6.3.0", - "6.3.1", - "6.3.2", - "6.4.0", - "6.4.1", - "6.4.2", - "6.5.0" - ), - getVersionCollection("6.5.0").getWireCompatible() - ); - - assertVersionsEquals( - asList( - "5.6.0", - "5.6.1", - "5.6.2", - "5.6.3", - "5.6.4", - "5.6.5", - "5.6.6", - "5.6.7", - "5.6.8", - "5.6.9", - "5.6.10", - "5.6.11", - "5.6.12", - "5.6.13", - "6.0.0", - "6.0.1", - "6.1.0", - "6.1.1", - "6.1.2", - "6.1.3", - "6.1.4", - "6.2.0", - "6.2.1", - "6.2.2", - "6.2.3", - "6.2.4", - "6.3.0", - "6.3.1", - "6.3.2", - "6.4.0", - "6.4.1", - "6.4.2" - ), - getVersionCollection("6.4.2").getWireCompatible() - ); - - assertVersionsEquals( - asList( - "5.6.0", - "5.6.1", - "5.6.2", - "5.6.3", - "5.6.4", - "5.6.5", - "5.6.6", - "5.6.7", - "5.6.8", - "5.6.9", - "5.6.10", - "5.6.11", - "5.6.12", - "5.6.13", - "6.0.0", - "6.0.1", - "6.1.0", - "6.1.1", - "6.1.2", - "6.1.3", - "6.1.4", - "6.2.0", - "6.2.1", - "6.2.2", - "6.2.3", - "6.2.4", - "6.3.0", - "6.3.1", - "6.3.2", - "6.4.0", - "6.4.1", - "6.4.2", - "6.5.0", - "6.6.0" - ), - getVersionCollection("6.6.0").getWireCompatible() - ); - - assertVersionsEquals(asList("7.3.0", "8.0.0"), getVersionCollection("8.0.0").getWireCompatible()); - assertVersionsEquals(asList("6.7.0", "7.0.0", "7.1.0"), getVersionCollection("7.1.0").getWireCompatible()); - - } - - public void testWireCompatibleUnreleased() { - assertVersionsEquals(asList("6.5.0", "7.0.0"), getVersionCollection("7.0.0-alpha1").getUnreleasedWireCompatible()); - assertVersionsEquals(asList("5.6.13", "6.4.2", "6.5.0"), getVersionCollection("6.5.0").getUnreleasedWireCompatible()); - - assertVersionsEquals(asList("5.6.13", "6.4.2"), getVersionCollection("6.4.2").getUnreleasedWireCompatible()); - - assertVersionsEquals(asList("5.6.13", "6.4.2", "6.5.0", "6.6.0"), getVersionCollection("6.6.0").getUnreleasedWireCompatible()); - - assertVersionsEquals(asList("7.3.0", "8.0.0"), getVersionCollection("8.0.0").getUnreleasedWireCompatible()); - assertVersionsEquals(asList("6.7.0", "7.0.0", "7.1.0"), getVersionCollection("7.1.0").getWireCompatible()); - } - - public void testIndexCompatible() { - assertVersionsEquals( - asList( - "6.0.0", - "6.0.1", - "6.1.0", - "6.1.1", - "6.1.2", - "6.1.3", - "6.1.4", - "6.2.0", - "6.2.1", - "6.2.2", - "6.2.3", - "6.2.4", - "6.3.0", - "6.3.1", - "6.3.2", - "6.4.0", - "6.4.1", - "6.4.2", - "6.5.0", - "7.0.0" - ), - getVersionCollection("7.0.0-alpha1").getIndexCompatible() - ); - - assertVersionsEquals( - asList( - "5.0.0", - "5.0.1", - "5.0.2", - "5.1.1", - "5.1.2", - "5.2.0", - "5.2.1", - "5.2.2", - "5.3.0", - "5.3.1", - "5.3.2", - "5.3.3", - "5.4.0", - "5.4.1", - "5.4.2", - "5.4.3", - "5.5.0", - "5.5.1", - "5.5.2", - "5.5.3", - "5.6.0", - "5.6.1", - "5.6.2", - "5.6.3", - "5.6.4", - "5.6.5", - "5.6.6", - "5.6.7", - "5.6.8", - "5.6.9", - "5.6.10", - "5.6.11", - "5.6.12", - "5.6.13", - "6.0.0", - "6.0.1", - "6.1.0", - "6.1.1", - "6.1.2", - "6.1.3", - "6.1.4", - "6.2.0", - "6.2.1", - "6.2.2", - "6.2.3", - "6.2.4", - "6.3.0", - "6.3.1", - "6.3.2", - "6.4.0", - "6.4.1", - "6.4.2", - "6.5.0" - ), - getVersionCollection("6.5.0").getIndexCompatible() - ); - - assertVersionsEquals( - asList( - "5.0.0", - "5.0.1", - "5.0.2", - "5.1.1", - "5.1.2", - "5.2.0", - "5.2.1", - "5.2.2", - "5.3.0", - "5.3.1", - "5.3.2", - "5.3.3", - "5.4.0", - "5.4.1", - "5.4.2", - "5.4.3", - "5.5.0", - "5.5.1", - "5.5.2", - "5.5.3", - "5.6.0", - "5.6.1", - "5.6.2", - "5.6.3", - "5.6.4", - "5.6.5", - "5.6.6", - "5.6.7", - "5.6.8", - "5.6.9", - "5.6.10", - "5.6.11", - "5.6.12", - "5.6.13", - "6.0.0", - "6.0.1", - "6.1.0", - "6.1.1", - "6.1.2", - "6.1.3", - "6.1.4", - "6.2.0", - "6.2.1", - "6.2.2", - "6.2.3", - "6.2.4", - "6.3.0", - "6.3.1", - "6.3.2", - "6.4.0", - "6.4.1", - "6.4.2" - ), - getVersionCollection("6.4.2").getIndexCompatible() - ); - - assertVersionsEquals( - asList( - "5.0.0", - "5.0.1", - "5.0.2", - "5.1.1", - "5.1.2", - "5.2.0", - "5.2.1", - "5.2.2", - "5.3.0", - "5.3.1", - "5.3.2", - "5.3.3", - "5.4.0", - "5.4.1", - "5.4.2", - "5.4.3", - "5.5.0", - "5.5.1", - "5.5.2", - "5.5.3", - "5.6.0", - "5.6.1", - "5.6.2", - "5.6.3", - "5.6.4", - "5.6.5", - "5.6.6", - "5.6.7", - "5.6.8", - "5.6.9", - "5.6.10", - "5.6.11", - "5.6.12", - "5.6.13", - "6.0.0", - "6.0.1", - "6.1.0", - "6.1.1", - "6.1.2", - "6.1.3", - "6.1.4", - "6.2.0", - "6.2.1", - "6.2.2", - "6.2.3", - "6.2.4", - "6.3.0", - "6.3.1", - "6.3.2", - "6.4.0", - "6.4.1", - "6.4.2", - "6.5.0", - "6.6.0" - ), - getVersionCollection("6.6.0").getIndexCompatible() - ); - - assertVersionsEquals( - asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"), - getVersionCollection("8.0.0").getIndexCompatible() - ); - } - - public void testIndexCompatibleUnreleased() { - assertVersionsEquals(asList("6.4.2", "6.5.0", "7.0.0"), getVersionCollection("7.0.0-alpha1").getUnreleasedIndexCompatible()); - - assertVersionsEquals(asList("5.6.13", "6.4.2", "6.5.0"), getVersionCollection("6.5.0").getUnreleasedIndexCompatible()); - - assertVersionsEquals(asList("5.6.13", "6.4.2"), getVersionCollection("6.4.2").getUnreleasedIndexCompatible()); - - assertVersionsEquals(asList("5.6.13", "6.4.2", "6.5.0", "6.6.0"), getVersionCollection("6.6.0").getUnreleasedIndexCompatible()); - - assertVersionsEquals(asList("7.1.1", "7.2.0", "7.3.0", "8.0.0"), getVersionCollection("8.0.0").getUnreleasedIndexCompatible()); - } - - public void testGetUnreleased() { - assertVersionsEquals(asList("6.4.2", "6.5.0", "7.0.0-alpha1"), getVersionCollection("7.0.0-alpha1").getUnreleased()); - assertVersionsEquals(asList("5.6.13", "6.4.2", "6.5.0"), getVersionCollection("6.5.0").getUnreleased()); - assertVersionsEquals(asList("5.6.13", "6.4.2"), getVersionCollection("6.4.2").getUnreleased()); - assertVersionsEquals(asList("5.6.13", "6.4.2", "6.5.0", "6.6.0"), getVersionCollection("6.6.0").getUnreleased()); - assertVersionsEquals(asList("7.1.1", "7.2.0", "7.3.0", "8.0.0"), getVersionCollection("8.0.0").getUnreleased()); - } - - public void testGetBranch() { - assertUnreleasedBranchNames(asList("6.4", "6.x"), getVersionCollection("7.0.0-alpha1")); - assertUnreleasedBranchNames(asList("5.6", "6.4"), getVersionCollection("6.5.0")); - assertUnreleasedBranchNames(singletonList("5.6"), getVersionCollection("6.4.2")); - assertUnreleasedBranchNames(asList("5.6", "6.4", "6.5"), getVersionCollection("6.6.0")); - assertUnreleasedBranchNames(asList("7.1", "7.2", "7.x"), getVersionCollection("8.0.0")); - } - - public void testGetGradleProjectPath() { - assertUnreleasedGradleProjectPaths( - asList(":distribution:bwc:bugfix", ":distribution:bwc:minor"), - getVersionCollection("7.0.0-alpha1") - ); - assertUnreleasedGradleProjectPaths( - asList(":distribution:bwc:maintenance", ":distribution:bwc:bugfix"), - getVersionCollection("6.5.0") - ); - assertUnreleasedGradleProjectPaths(singletonList(":distribution:bwc:maintenance"), getVersionCollection("6.4.2")); - assertUnreleasedGradleProjectPaths( - asList(":distribution:bwc:maintenance", ":distribution:bwc:bugfix", ":distribution:bwc:minor"), - getVersionCollection("6.6.0") - ); - assertUnreleasedGradleProjectPaths( - asList(":distribution:bwc:bugfix", ":distribution:bwc:staged", ":distribution:bwc:minor"), - getVersionCollection("8.0.0") - ); - assertUnreleasedGradleProjectPaths( - asList(":distribution:bwc:maintenance", ":distribution:bwc:staged", ":distribution:bwc:minor"), - getVersionCollection("7.1.0") - ); - } - - public void testCompareToAuthoritative() { - List listOfVersions = asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"); - List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1", "7.1.0") - .map(Version::fromString) - .collect(Collectors.toList()); - - BwcVersions vc = new BwcVersions( - listOfVersions.stream().map(this::formatVersionToLine).collect(Collectors.toList()), - Version.fromString("8.0.0") - ); - vc.compareToAuthoritative(authoritativeReleasedVersions); - } - - public void testCompareToAuthoritativeUnreleasedActuallyReleased() { - List listOfVersions = asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"); - List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1", "7.1.0", "7.1.1", "8.0.0") - .map(Version::fromString) - .collect(Collectors.toList()); - - BwcVersions vc = new BwcVersions( - listOfVersions.stream().map(this::formatVersionToLine).collect(Collectors.toList()), - Version.fromString("8.0.0") - ); - expectedEx.expect(IllegalStateException.class); - expectedEx.expectMessage("but they are released"); - vc.compareToAuthoritative(authoritativeReleasedVersions); - } - - public void testCompareToAuthoritativeNotReallyRelesed() { - List listOfVersions = asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"); - List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1").map(Version::fromString).collect(Collectors.toList()); - BwcVersions vc = new BwcVersions( - listOfVersions.stream().map(this::formatVersionToLine).collect(Collectors.toList()), - Version.fromString("8.0.0") - ); - expectedEx.expect(IllegalStateException.class); - expectedEx.expectMessage("not really released"); - vc.compareToAuthoritative(authoritativeReleasedVersions); - } - - private void assertUnreleasedGradleProjectPaths(List expectedNAmes, BwcVersions bwcVersions) { - List actualNames = new ArrayList<>(); - bwcVersions.forPreviousUnreleased(unreleasedVersion -> actualNames.add(unreleasedVersion.gradleProjectPath)); - assertEquals(expectedNAmes, actualNames); - } - - private void assertUnreleasedBranchNames(List expectedBranches, BwcVersions bwcVersions) { - List actualBranches = new ArrayList<>(); - bwcVersions.forPreviousUnreleased(unreleasedVersionInfo -> actualBranches.add(unreleasedVersionInfo.branch)); - assertEquals(expectedBranches, actualBranches); - } - - private String formatVersionToLine(final String version) { - return " public static final Version V_" + version.replaceAll("\\.", "_") + " "; - } - - private void assertVersionsEquals(List expected, List actual) { - assertEquals(expected.stream().map(Version::fromString).collect(Collectors.toList()), actual); - } - - private BwcVersions getVersionCollection(String currentVersion) { - return new BwcVersions( - sampleVersions.get(currentVersion).stream().map(this::formatVersionToLine).collect(Collectors.toList()), - Version.fromString(currentVersion) - ); - } -} diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginTests.java index 12acc036782c6..4ff64ca31c1c0 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginTests.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/InternalDistributionDownloadPluginTests.java @@ -38,10 +38,10 @@ public void testLocalBwcPackages() { for (ElasticsearchDistributionType packageType : types) { // note: no non bundled jdk for bwc String configName = projectName(packageType.toString(), true); - checkBwc("minor", configName, BWC_MINOR_VERSION, packageType, null, BWC_MINOR); - checkBwc("staged", configName, BWC_STAGED_VERSION, packageType, null, BWC_STAGED); - checkBwc("bugfix", configName, BWC_BUGFIX_VERSION, packageType, null, BWC_BUGFIX); - checkBwc("maintenance", configName, BWC_MAINTENANCE_VERSION, packageType, null, BWC_MAINTENANCE); + checkBwc("minor", configName, BWC_MINOR_VERSION.elasticsearch, packageType, null, BWC_MINOR); + checkBwc("staged", configName, BWC_STAGED_VERSION.elasticsearch, packageType, null, BWC_STAGED); + checkBwc("bugfix", configName, BWC_BUGFIX_VERSION.elasticsearch, packageType, null, BWC_BUGFIX); + checkBwc("maintenance", configName, BWC_MAINTENANCE_VERSION.elasticsearch, packageType, null, BWC_MAINTENANCE); } } } diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index 544cc47d4a9c0..8db18b63c36ba 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.0.0 -lucene = 9.0.0-snapshot-cfd9f9f98f7 +lucene = 9.0.0-snapshot-2719cf6630e bundled_jdk_vendor = adoptium bundled_jdk = 17+35 @@ -44,7 +44,6 @@ httpasyncclient = 4.1.4 commonslogging = 1.1.3 commonscodec = 1.14 hamcrest = 2.1 -securemock = 1.2 mocksocket = 1.2 # benchmark dependencies diff --git a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy index 0bd8768ccc6c6..8ed7f8cd4fba2 100644 --- a/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy +++ b/build-tools/src/testFixtures/groovy/org/elasticsearch/gradle/fixtures/AbstractGradleFuncTest.groovy @@ -35,10 +35,11 @@ abstract class AbstractGradleFuncTest extends Specification { settingsFile << "rootProject.name = 'hello-world'\n" buildFile = testProjectDir.newFile('build.gradle') propertiesFile = testProjectDir.newFile('gradle.properties') - propertiesFile << "org.gradle.java.installations.fromEnv=JAVA_HOME,RUNTIME_JAVA_HOME,JAVA15_HOME,JAVA14_HOME,JAVA13_HOME,JAVA12_HOME,JAVA11_HOME,JAVA8_HOME" + propertiesFile << + "org.gradle.java.installations.fromEnv=JAVA_HOME,RUNTIME_JAVA_HOME,JAVA15_HOME,JAVA14_HOME,JAVA13_HOME,JAVA12_HOME,JAVA11_HOME,JAVA8_HOME" } - File addSubProject(String subProjectPath){ + File addSubProject(String subProjectPath) { def subProjectBuild = file(subProjectPath.replace(":", "/") + "/build.gradle") settingsFile << "include \"${subProjectPath}\"\n" subProjectBuild @@ -50,13 +51,14 @@ abstract class AbstractGradleFuncTest extends Specification { GradleRunner gradleRunner(File projectDir, String... arguments) { return new NormalizeOutputGradleRunner( - new InternalAwareGradleRunner(GradleRunner.create() + new InternalAwareGradleRunner( + GradleRunner.create() .withDebug(ManagementFactory.getRuntimeMXBean().getInputArguments().toString().indexOf("-agentlib:jdwp") > 0) .withProjectDir(projectDir) .withPluginClasspath() .forwardOutput() - ), - projectDir + ), + projectDir ).withArguments(arguments) } @@ -96,23 +98,34 @@ abstract class AbstractGradleFuncTest extends Specification { return jarFile; } - File internalBuild(File buildScript = buildFile, String bugfix = "7.10.1", String staged = "7.11.0", String minor = "7.12.0") { + File internalBuild( + File buildScript = buildFile, + String bugfix = "7.15.2", + String bugfixLucene = "8.9.0", + String staged = "7.16.0", + String stagedLucene = "8.10.0", + String minor = "8.0.0", + String minorLucene = "9.0.0" + ) { buildScript << """plugins { id 'elasticsearch.global-build-info' } import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.internal.info.BuildParams + import org.elasticsearch.gradle.internal.BwcVersions.VersionPair import org.elasticsearch.gradle.internal.BwcVersions import org.elasticsearch.gradle.Version - Version currentVersion = Version.fromString("8.0.0") - def versionList = [] - versionList.addAll( - Arrays.asList(Version.fromString("$bugfix"), Version.fromString("$staged"), Version.fromString("$minor"), currentVersion) - ) + Version currentVersion = Version.fromString("8.1.0") + def versionList = [ + new VersionPair(Version.fromString("$bugfix"), Version.fromString("$bugfixLucene")), + new VersionPair(Version.fromString("$staged"), Version.fromString("$stagedLucene")), + new VersionPair(Version.fromString("$minor"), Version.fromString("$minorLucene")), + new VersionPair(currentVersion, Version.fromString("9.0.0")) + ] - BwcVersions versions = new BwcVersions(new TreeSet<>(versionList), currentVersion) + BwcVersions versions = new BwcVersions(currentVersion, versionList) BuildParams.init { it.setBwcVersions(provider(() -> versions)) } """ } @@ -128,9 +141,9 @@ abstract class AbstractGradleFuncTest extends Specification { void execute(String command, File workingDir = testProjectDir.root) { def proc = command.execute(Collections.emptyList(), workingDir) proc.waitFor() - if(proc.exitValue()) { + if (proc.exitValue()) { System.err.println("Error running command ${command}:") System.err.println("Syserr: " + proc.errorStream.text) } } -} \ No newline at end of file +} diff --git a/build.gradle b/build.gradle index 1569cec42e9a5..79182b35a740f 100644 --- a/build.gradle +++ b/build.gradle @@ -116,12 +116,6 @@ tasks.register("verifyVersions") { throw new GradleException("No branch choice exists for development branch ${unreleasedVersion.branch} in .backportrc.json.") } } - BwcVersions.UnreleasedVersionInfo nextMinor = unreleased.find { it.branch.endsWith("x") } - String versionMapping = backportConfig.get("branchLabelMapping").fields().find { it.value.textValue() == nextMinor.branch }.key - if (versionMapping != "^v${nextMinor.version}\$") { - throw new GradleException("Backport label mapping for branch ${nextMinor.branch} is '${versionMapping}' but should be " + - "'^v${nextMinor.version}\$'. Update .backportrc.json.") - } } } diff --git a/client/rest-high-level/qa/ssl-enabled/src/javaRestTest/java/org/elasticsearch/client/documentation/EnrollmentDocumentationIT.java b/client/rest-high-level/qa/ssl-enabled/src/javaRestTest/java/org/elasticsearch/client/documentation/EnrollmentDocumentationIT.java index cbdf9ccfac712..deb571147bfd6 100644 --- a/client/rest-high-level/qa/ssl-enabled/src/javaRestTest/java/org/elasticsearch/client/documentation/EnrollmentDocumentationIT.java +++ b/client/rest-high-level/qa/ssl-enabled/src/javaRestTest/java/org/elasticsearch/client/documentation/EnrollmentDocumentationIT.java @@ -28,6 +28,7 @@ import static org.hamcrest.Matchers.startsWith; +@SuppressWarnings("removal") public class EnrollmentDocumentationIT extends ESRestHighLevelClientTestCase { static Path HTTP_TRUSTSTORE; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/AsyncSearchClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/AsyncSearchClient.java index d01c81ab2a93f..30dbfd022e205 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/AsyncSearchClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/AsyncSearchClient.java @@ -19,6 +19,13 @@ import static java.util.Collections.emptySet; +/** + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client + */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public class AsyncSearchClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java index 5beb97b4dfc16..70975ccc42e74 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java @@ -39,7 +39,13 @@ *

* See the * X-Pack Rollup APIs on elastic.co for more information. + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class CcrClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index 550ea1199c74c..114547dfe30c2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -34,7 +34,13 @@ * A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Cluster API. *

* See Cluster API on elastic.co + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class ClusterClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/EnrichClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/EnrichClient.java index d102c5cb1811d..3744eaeb93ad8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/EnrichClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/EnrichClient.java @@ -27,7 +27,13 @@ *

* See the * X-Pack Enrich Policy APIs on elastic.co for more information. + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class EnrichClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/EqlClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/EqlClient.java index cf59c7f6a8358..6b1a44ca6ea1e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/EqlClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/EqlClient.java @@ -23,7 +23,13 @@ *

* See the * EQL APIs on elastic.co for more information. + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class EqlClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/FeaturesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/FeaturesClient.java index a26e1dcc8843d..11c92276ce6e5 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/FeaturesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/FeaturesClient.java @@ -22,7 +22,13 @@ * A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Snapshot API. *

* See Snapshot API on elastic.co + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public class FeaturesClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java index 996ab0d3923f7..f0652eeecc852 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java @@ -16,7 +16,13 @@ import static java.util.Collections.emptySet; - +/** + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client + */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public class GraphClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java index b495ff4fafdfd..3fcc3740a63ba 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java @@ -40,6 +40,13 @@ import static java.util.Collections.emptySet; +/** + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client + */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public class IndexLifecycleClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index a1168e37b021c..f56231f5a263a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -81,7 +81,13 @@ * A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Indices API. *

* See Indices API on elastic.co + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class IndicesClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java index 9dbf3f7f8f072..8f4ccfcf306f5 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java @@ -27,7 +27,13 @@ * A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Ingest API. *

* See Ingest API on elastic.co + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class IngestClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java index 4727990951576..5add90115f19b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/LicenseClient.java @@ -45,7 +45,13 @@ *

* See the * X-Pack Licensing APIs on elastic.co for more information. + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class LicenseClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index 4f34dd3169bb4..4daa42349a0f2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -123,7 +123,13 @@ *

* See the * X-Pack Machine Learning APIs for additional information. + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class MachineLearningClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java index d1c53fc3ad96b..a48ec45ad8854 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MigrationClient.java @@ -25,7 +25,13 @@ *

* See the * X-Pack Migration APIs on elastic.co for more information. + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class MigrationClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 52e1412fa8b4b..6f9cbc62011c7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -250,7 +250,12 @@ * {@link ResponseException} * * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public class RestHighLevelClient implements Closeable { private static final Logger logger = LogManager.getLogger(RestHighLevelClient.class); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClientBuilder.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClientBuilder.java index 6357cc2851c32..7109329da8198 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClientBuilder.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClientBuilder.java @@ -18,8 +18,12 @@ /** * Helper to build a {@link RestHighLevelClient}, allowing setting the low-level client that * should be used as well as whether API compatibility should be used. + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ - +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public class RestHighLevelClientBuilder { private final RestClient restClient; private CheckedConsumer closeHandler = RestClient::close; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java index d47b3edd0723a..397ffcf8116c0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java @@ -34,7 +34,13 @@ *

* See the * X-Pack Rollup APIs on elastic.co for more information. + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public class RollupClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SearchableSnapshotsClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SearchableSnapshotsClient.java index fecbea6ee5a8f..4bb3f2367fdba 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SearchableSnapshotsClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SearchableSnapshotsClient.java @@ -23,7 +23,13 @@ * * See the Searchable Snapshots * APIs on elastic.co for more information. + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public class SearchableSnapshotsClient { private RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java index bcd91485417cd..86f2a04d7fc4f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java @@ -92,7 +92,13 @@ * A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Security APIs. *

* See Security APIs on elastic.co + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class SecurityClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index 730399481f72b..28406b9bb119a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -37,7 +37,13 @@ * A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Snapshot API. *

* See Snapshot API on elastic.co + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class SnapshotClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java index 34745bb8fbf29..0d3cc51998f9e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java @@ -25,7 +25,13 @@ * A wrapper for the {@link RestHighLevelClient} that provides methods for accessing the Tasks API. *

* See Task Management API on elastic.co + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class TasksClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TextStructureClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TextStructureClient.java index 83dc972e9364a..5549ac16666c7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TextStructureClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TextStructureClient.java @@ -7,20 +7,26 @@ */ package org.elasticsearch.client; -import java.io.IOException; -import java.util.Collections; - import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.textstructure.FindStructureRequest; import org.elasticsearch.client.textstructure.FindStructureResponse; +import java.io.IOException; +import java.util.Collections; + /** * Text Structure API client wrapper for the {@link RestHighLevelClient} *

* See the * X-Pack Text Structure APIs for additional information. + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class TextStructureClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TransformClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TransformClient.java index 498d9e4b6d24c..74be3b94054ac 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TransformClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TransformClient.java @@ -28,6 +28,13 @@ import java.io.IOException; import java.util.Collections; +/** + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client + */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class TransformClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java index 72b2ae07eef8a..9f535e76ec9f5 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/WatcherClient.java @@ -33,6 +33,13 @@ import static java.util.Collections.emptySet; import static java.util.Collections.singleton; +/** + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client + */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class WatcherClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java index a5eeb23827db1..9767e1cba944f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java @@ -26,7 +26,13 @@ *

* See the * REST APIs on elastic.co for more information. + * + * @deprecated The High Level Rest Client is deprecated in favor of the + * + * Elasticsearch Java API Client */ +@Deprecated(since = "7.16.0", forRemoval = true) +@SuppressWarnings("removal") public final class XPackClient { private final RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/GetFeatureUpgradeStatusResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/GetFeatureUpgradeStatusResponse.java index e75d2c7ce2858..545837d7e5b87 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/GetFeatureUpgradeStatusResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/GetFeatureUpgradeStatusResponse.java @@ -25,7 +25,7 @@ public class GetFeatureUpgradeStatusResponse { private static final ParseField FEATURE_UPGRADE_STATUSES = new ParseField("features"); - private static final ParseField UPGRADE_STATUS = new ParseField("upgrade_status"); + private static final ParseField UPGRADE_STATUS = new ParseField("migration_status"); private final List featureUpgradeStatuses; private final String upgradeStatus; @@ -76,7 +76,7 @@ public static class FeatureUpgradeStatus { private static final ParseField FEATURE_NAME = new ParseField("feature_name"); private static final ParseField MINIMUM_INDEX_VERSION = new ParseField("minimum_index_version"); - private static final ParseField UPGRADE_STATUS = new ParseField("upgrade_status"); + private static final ParseField UPGRADE_STATUS = new ParseField("migration_status"); private static final ParseField INDEX_VERSIONS = new ParseField("indices"); @SuppressWarnings("unchecked") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java index 30224e0151995..62bc270fedfdc 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java @@ -60,6 +60,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +@SuppressWarnings("removal") public class CCRIT extends ESRestHighLevelClientTestCase { @Before diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 11e0c8f24f755..480278db72917 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -37,13 +37,13 @@ import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.SniffConnectionStrategy; +import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.util.HashMap; @@ -316,7 +316,7 @@ public void testClusterHealthNotFoundIndex() throws IOException { assertThat(response.getStatus(), equalTo(ClusterHealthStatus.RED)); assertNoIndices(response); assertWarnings("The HTTP status code for a cluster health timeout will be changed from 408 to 200 in a " + - "future version. Set the [es.cluster_health.request_timeout_200] system property to [true] to suppress this message and " + + "future version. Set the [return_200_for_cluster_health_timeout] query parameter to [true] to suppress this message and " + "opt in to the future behaviour now."); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java index d65a2c6770695..843e4475c8370 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java @@ -53,6 +53,7 @@ /** * Test and demonstrates how {@link RestHighLevelClient} can be extended to support custom endpoints. */ +@SuppressWarnings("removal") public class CustomRestHighLevelClientTests extends ESTestCase { private static final String ENDPOINT = "/_custom"; @@ -68,13 +69,13 @@ public void initClients() throws IOException { doAnswer(inv -> mockPerformRequest((Request) inv.getArguments()[0])) .when(restClient) - .performRequest(argThat(new RequestMatcher("GET", ENDPOINT))); + .performRequest(argThat(new RequestMatcher("GET", ENDPOINT)::matches)); doAnswer(inv -> mockPerformRequestAsync( ((Request) inv.getArguments()[0]), (ResponseListener) inv.getArguments()[1])) .when(restClient) - .performRequestAsync(argThat(new RequestMatcher("GET", ENDPOINT)), any(ResponseListener.class)); + .performRequestAsync(argThat(new RequestMatcher("GET", ENDPOINT)::matches), any(ResponseListener.class)); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index 7000e91b049ea..bd779d2b1171b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -62,6 +62,7 @@ import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.notNullValue; +@SuppressWarnings("removal") public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase { public static final String IGNORE_THROTTLED_DEPRECATION_WARNING = "[ignore_throttled] parameter is deprecated because frozen " + diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/EnrichIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/EnrichIT.java index a97ce12962294..ca89be39c3c05 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/EnrichIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/EnrichIT.java @@ -26,6 +26,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; +@SuppressWarnings("removal") public class EnrichIT extends ESRestHighLevelClientTestCase { public void testCRUD() throws Exception { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/EqlIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/EqlIT.java index c7661ac28b5ef..64c92c06b29ff 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/EqlIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/EqlIT.java @@ -37,6 +37,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +@SuppressWarnings("removal") public class EqlIT extends ESRestHighLevelClientTestCase { private static final String INDEX_NAME = "index"; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/FeaturesIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/FeaturesIT.java index 9af22ebcafe20..c9527bc123956 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/FeaturesIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/FeaturesIT.java @@ -21,6 +21,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.notNullValue; +@SuppressWarnings("removal") public class FeaturesIT extends ESRestHighLevelClientTestCase { public void testGetFeatures() throws IOException { GetFeaturesRequest request = new GetFeaturesRequest(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 404e49fdb4d95..8e7783db95ad1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -42,7 +42,6 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.core.ShardsAcknowledgedResponse; import org.elasticsearch.client.indices.AnalyzeRequest; import org.elasticsearch.client.indices.AnalyzeResponse; import org.elasticsearch.client.indices.CloseIndexRequest; @@ -58,7 +57,6 @@ import org.elasticsearch.client.indices.DeleteAliasRequest; import org.elasticsearch.client.indices.DeleteComposableIndexTemplateRequest; import org.elasticsearch.client.indices.DeleteDataStreamRequest; -import org.elasticsearch.client.indices.FreezeIndexRequest; import org.elasticsearch.client.indices.GetComposableIndexTemplateRequest; import org.elasticsearch.client.indices.GetComposableIndexTemplatesResponse; import org.elasticsearch.client.indices.GetDataStreamRequest; @@ -80,7 +78,6 @@ import org.elasticsearch.client.indices.ReloadAnalyzersResponse; import org.elasticsearch.client.indices.SimulateIndexTemplateRequest; import org.elasticsearch.client.indices.SimulateIndexTemplateResponse; -import org.elasticsearch.client.indices.UnfreezeIndexRequest; import org.elasticsearch.client.indices.rollover.RolloverRequest; import org.elasticsearch.client.indices.rollover.RolloverResponse; import org.elasticsearch.cluster.metadata.AliasMetadata; @@ -95,16 +92,16 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; import java.util.Arrays; @@ -134,6 +131,7 @@ import static org.hamcrest.Matchers.startsWith; import static org.hamcrest.core.IsInstanceOf.instanceOf; +@SuppressWarnings("removal") public class IndicesClientIT extends ESRestHighLevelClientTestCase { public static final RequestOptions LEGACY_TEMPLATE_OPTIONS = RequestOptions.DEFAULT.toBuilder() @@ -1534,24 +1532,6 @@ public void testAnalyze() throws Exception { assertNotNull(detailsResponse.detail()); } - public void testFreezeAndUnfreeze() throws IOException { - createIndex("test", Settings.EMPTY); - RestHighLevelClient client = highLevelClient(); - - final RequestOptions freezeIndexOptions = RequestOptions.DEFAULT.toBuilder() - .setWarningsHandler(warnings -> List.of(FROZEN_INDICES_DEPRECATION_WARNING).equals(warnings) == false).build(); - - ShardsAcknowledgedResponse freeze = execute(new FreezeIndexRequest("test"), client.indices()::freeze, - client.indices()::freezeAsync, freezeIndexOptions); - assertTrue(freeze.isShardsAcknowledged()); - assertTrue(freeze.isAcknowledged()); - - ShardsAcknowledgedResponse unfreeze = execute(new UnfreezeIndexRequest("test"), client.indices()::unfreeze, - client.indices()::unfreezeAsync, freezeIndexOptions); - assertTrue(unfreeze.isShardsAcknowledged()); - assertTrue(unfreeze.isAcknowledged()); - } - public void testReloadAnalyzer() throws IOException { createIndex("test", Settings.EMPTY); RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/LicenseIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/LicenseIT.java index d0423f893e8aa..a950abcb99bf4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/LicenseIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/LicenseIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.client; import org.elasticsearch.Build; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.license.DeleteLicenseRequest; import org.elasticsearch.client.license.GetBasicStatusResponse; @@ -23,6 +24,7 @@ import org.elasticsearch.client.license.StartTrialRequest; import org.elasticsearch.client.license.StartTrialResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.After; @@ -40,6 +42,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.stringContainsInOrder; public class LicenseIT extends ESRestHighLevelClientTestCase { @@ -91,6 +94,40 @@ public void testStartTrial() throws Exception { } } + public void testPutInvalidTrialLicense() throws Exception { + assumeTrue("Trial license is only valid when tested against snapshot/test builds", + Build.CURRENT.isSnapshot()); + + // use a hard-coded trial license for 20 yrs to be able to roll back from another licenses + final String signature = "xx"; // Truncated, so it is expected to fail validation + final String licenseDefinition = Strings.toString(jsonBuilder() + .startObject() + .field("licenses", List.of( + Map.of( + "uid", "96fc37c6-6fc9-43e2-a40d-73143850cd72", + "type", "trial", + // 2018-10-16 07:02:48 UTC + "issue_date_in_millis", "1539673368158", + // 2038-10-11 07:02:48 UTC, 20 yrs later + "expiry_date_in_millis", "2170393368158", + "max_nodes", "5", + "issued_to", "client_rest-high-level_integTestCluster", + "issuer", "elasticsearch", + "start_date_in_millis", "-1", + "signature", signature))) + .endObject()); + + final PutLicenseRequest request = new PutLicenseRequest(); + request.setAcknowledge(true); + request.setLicenseDefinition(licenseDefinition); + ElasticsearchStatusException e = expectThrows( + ElasticsearchStatusException.class, + () -> highLevelClient().license().putLicense(request, RequestOptions.DEFAULT) + ); + assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(e.getMessage(), stringContainsInOrder("malformed signature for license")); + } + public static void putTrialLicense() throws IOException { assumeTrue("Trial license is only valid when tested against snapshot/test builds", Build.CURRENT.isSnapshot()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java index 9363a4cf97fe6..7e4ff761209e0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningGetResultsIT.java @@ -55,6 +55,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.nullValue; +@SuppressWarnings("removal") public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase { private static final String RESULTS_INDEX = ".ml-anomalies-shared"; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index d5c286b38709e..d25cfa94759f7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -218,6 +218,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +@SuppressWarnings("removal") public class MachineLearningIT extends ESRestHighLevelClientTestCase { private static final RequestOptions POST_DATA_OPTIONS = RequestOptions.DEFAULT.toBuilder() diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java index 06ee1c97039f6..67d4818d26fc0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java @@ -23,8 +23,8 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; public class MigrationIT extends ESRestHighLevelClientTestCase { @@ -42,7 +42,7 @@ public void testGetDeprecationInfo() throws IOException { public void testGetFeatureUpgradeStatus() throws IOException { GetFeatureUpgradeStatusRequest request = new GetFeatureUpgradeStatusRequest(); GetFeatureUpgradeStatusResponse response = highLevelClient().migration().getFeatureUpgradeStatus(request, RequestOptions.DEFAULT); - assertThat(response.getUpgradeStatus(), equalTo("NO_UPGRADE_NEEDED")); + assertThat(response.getUpgradeStatus(), equalTo("NO_MIGRATION_NEEDED")); assertThat(response.getFeatureUpgradeStatuses().size(), greaterThanOrEqualTo(1)); Optional optionalTasksStatus = response.getFeatureUpgradeStatuses().stream() .filter(status -> "tasks".equals(status.getFeatureName())) @@ -52,7 +52,7 @@ public void testGetFeatureUpgradeStatus() throws IOException { GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus tasksStatus = optionalTasksStatus.get(); - assertThat(tasksStatus.getUpgradeStatus(), equalTo("NO_UPGRADE_NEEDED")); + assertThat(tasksStatus.getUpgradeStatus(), equalTo("NO_MIGRATION_NEEDED")); assertThat(tasksStatus.getMinimumIndexVersion(), equalTo(Version.CURRENT.toString())); assertThat(tasksStatus.getFeatureName(), equalTo("tasks")); } @@ -60,11 +60,8 @@ public void testGetFeatureUpgradeStatus() throws IOException { public void testPostFeatureUpgradeStatus() throws IOException { PostFeatureUpgradeRequest request = new PostFeatureUpgradeRequest(); PostFeatureUpgradeResponse response = highLevelClient().migration().postFeatureUpgrade(request, RequestOptions.DEFAULT); - assertThat(response.isAccepted(), equalTo(true)); - assertThat(response.getFeatures().size(), equalTo(1)); - PostFeatureUpgradeResponse.Feature feature = response.getFeatures().get(0); - assertThat(feature.getFeatureName(), equalTo("security")); - assertThat(response.getReason(), nullValue()); - assertThat(response.getElasticsearchException(), nullValue()); + assertThat(response.isAccepted(), equalTo(false)); + assertThat(response.getFeatures(), hasSize(0)); + assertThat(response.getReason(), equalTo("No system indices require migration")); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MlTestStateCleaner.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MlTestStateCleaner.java index 5cbffb7157ca4..419e9f8790e1a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MlTestStateCleaner.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MlTestStateCleaner.java @@ -24,6 +24,7 @@ /** * Cleans up ML resources created during tests */ +@SuppressWarnings("removal") public class MlTestStateCleaner { private final Logger logger; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MockRestHighLevelTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MockRestHighLevelTests.java index e9812ac68dbeb..6752cec97bd60 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MockRestHighLevelTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MockRestHighLevelTests.java @@ -27,6 +27,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +@SuppressWarnings("removal") public class MockRestHighLevelTests extends ESTestCase { private RestHighLevelClient client; private static final List WARNINGS = Collections.singletonList("Some Warning"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientExtTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientExtTests.java index bca7f64e9c502..883e891aff7c6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientExtTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientExtTests.java @@ -28,6 +28,7 @@ * This test works against a {@link RestHighLevelClient} subclass that simulates how custom response sections returned by * Elasticsearch plugins can be parsed using the high level client. */ +@SuppressWarnings("removal") public class RestHighLevelClientExtTests extends ESTestCase { private RestHighLevelClient restHighLevelClient; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index ff7b0d581f46f..51f1d8ae42680 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -160,6 +160,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +@SuppressWarnings("removal") public class RestHighLevelClientTests extends ESTestCase { private static final String SUBMIT_TASK_PREFIX = "submit_"; @@ -236,7 +237,7 @@ public static void mockGetRoot(RestClient restClient, byte[] responseBody, boole } when(restClient - .performRequestAsync(argThat(new RequestMatcher("GET", "/")), any())) + .performRequestAsync(argThat(new RequestMatcher("GET", "/")::matches), any())) .thenAnswer(i -> { ((ResponseListener)i.getArguments()[1]).onSuccess(response); return Cancellable.NO_OP; @@ -1035,7 +1036,8 @@ private static void doTestProductCompatibilityCheck( Build build = new Build(Build.Flavor.DEFAULT, Build.Type.UNKNOWN, "hash", "date", false, version); mockGetRoot(restClient, build, setProductHeader); - when(restClient.performRequest(argThat(new RequestMatcher("HEAD", "/foo/_source/bar")))).thenReturn(apiResponse); + when(restClient.performRequest(argThat(new RequestMatcher("HEAD", "/foo/_source/bar")::matches))) + .thenReturn(apiResponse); RestHighLevelClient highLevelClient = new RestHighLevelClient(restClient, RestClient::close, Collections.emptyList()); @@ -1082,7 +1084,8 @@ public void testProductCompatibilityTagline() throws Exception { when(apiStatus.getStatusCode()).thenReturn(200); Response apiResponse = mock(Response.class); when(apiResponse.getStatusLine()).thenReturn(apiStatus); - when(restClient.performRequest(argThat(new RequestMatcher("HEAD", "/foo/_source/bar")))).thenReturn(apiResponse); + when(restClient.performRequest(argThat(new RequestMatcher("HEAD", "/foo/_source/bar")::matches))) + .thenReturn(apiResponse); RestHighLevelClient highLevelClient = new RestHighLevelClient(restClient, RestClient::close, Collections.emptyList()); @@ -1120,7 +1123,8 @@ public void testProductCompatibilityFlavor() throws Exception { when(apiStatus.getStatusCode()).thenReturn(200); Response apiResponse = mock(Response.class); when(apiResponse.getStatusLine()).thenReturn(apiStatus); - when(restClient.performRequest(argThat(new RequestMatcher("HEAD", "/foo/_source/bar")))).thenReturn(apiResponse); + when(restClient.performRequest(argThat(new RequestMatcher("HEAD", "/foo/_source/bar")::matches))) + .thenReturn(apiResponse); RestHighLevelClient highLevelClient = new RestHighLevelClient(restClient, RestClient::close, Collections.emptyList()); @@ -1162,10 +1166,11 @@ public void testProductCompatibilityRequestFailure() throws Exception { when(apiStatus.getStatusCode()).thenReturn(200); Response apiResponse = mock(Response.class); when(apiResponse.getStatusLine()).thenReturn(apiStatus); - when(restClient.performRequest(argThat(new RequestMatcher("HEAD", "/foo/_source/bar")))).thenReturn(apiResponse); + when(restClient.performRequest(argThat(new RequestMatcher("HEAD", "/foo/_source/bar")::matches))) + .thenReturn(apiResponse); // Have the verification request fail - when(restClient.performRequestAsync(argThat(new RequestMatcher("GET", "/")), any())) + when(restClient.performRequestAsync(argThat(new RequestMatcher("GET", "/")::matches), any())) .thenAnswer(i -> { ((ResponseListener)i.getArguments()[1]).onFailure(new IOException("Something bad happened")); return Cancellable.NO_OP; @@ -1194,10 +1199,11 @@ public void testProductCompatibilityWithForbiddenInfoEndpoint() throws Exception when(apiStatus.getStatusCode()).thenReturn(200); Response apiResponse = mock(Response.class); when(apiResponse.getStatusLine()).thenReturn(apiStatus); - when(restClient.performRequest(argThat(new RequestMatcher("HEAD", "/foo/_source/bar")))).thenReturn(apiResponse); + when(restClient.performRequest(argThat(new RequestMatcher("HEAD", "/foo/_source/bar")::matches))) + .thenReturn(apiResponse); // Have the info endpoint used for verification return a 403 (forbidden) - when(restClient.performRequestAsync(argThat(new RequestMatcher("GET", "/")), any())) + when(restClient.performRequestAsync(argThat(new RequestMatcher("GET", "/")::matches), any())) .thenAnswer(i -> { StatusLine infoStatus = mock(StatusLine.class); when(apiStatus.getStatusCode()).thenReturn(HttpStatus.SC_FORBIDDEN); @@ -1220,7 +1226,8 @@ public void testCancellationForwarding() throws Exception { mockGetRoot(restClient); Cancellable cancellable = mock(Cancellable.class); - when(restClient.performRequestAsync(argThat(new RequestMatcher("HEAD", "/foo/_source/bar")), any())).thenReturn(cancellable); + when(restClient.performRequestAsync(argThat(new RequestMatcher("HEAD", "/foo/_source/bar")::matches), any())) + .thenReturn(cancellable); Cancellable result = restHighLevelClient.existsSourceAsync( new GetSourceRequest("foo", "bar"), diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java index c659c487bfcd3..97b0194a7fc68 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java @@ -71,6 +71,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; +@SuppressWarnings("removal") public class RollupIT extends ESRestHighLevelClientTestCase { double sum = 0.0d; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchableSnapshotsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchableSnapshotsIT.java index 5bb80056b2fa3..69fe432e6334d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchableSnapshotsIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchableSnapshotsIT.java @@ -49,6 +49,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; +@SuppressWarnings("removal") public class SearchableSnapshotsIT extends ESRestHighLevelClientTestCase { @Before diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java index 8cdb2fafe204b..8127b420da167 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java @@ -51,6 +51,7 @@ import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.nullValue; +@SuppressWarnings("removal") public class SecurityIT extends ESRestHighLevelClientTestCase { public void testPutUser() throws Exception { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TextStructureIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TextStructureIT.java index 9d684917c9718..2c94e56f90d79 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/TextStructureIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TextStructureIT.java @@ -16,6 +16,7 @@ import org.elasticsearch.client.textstructure.FindStructureResponse; import org.elasticsearch.client.textstructure.structurefinder.TextStructure; +@SuppressWarnings("removal") public class TextStructureIT extends ESRestHighLevelClientTestCase { public void testFindFileStructure() throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TransformIT.java index d43b6d133071b..4c31be8bb57cc 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/TransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TransformIT.java @@ -74,6 +74,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.oneOf; +@SuppressWarnings("removal") public class TransformIT extends ESRestHighLevelClientTestCase { private List transformsToClean = new ArrayList<>(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/AsyncSearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/AsyncSearchDocumentationIT.java index 21ae54131c541..dc2673f786cb9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/AsyncSearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/AsyncSearchDocumentationIT.java @@ -33,6 +33,7 @@ * Documentation for Async Search APIs in the high level java client. * Code wrapped in {@code tag} and {@code end} tags is included in the docs. */ +@SuppressWarnings("removal") public class AsyncSearchDocumentationIT extends ESRestHighLevelClientTestCase { @Before void setUpIndex() throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java index 36d193f1a9608..0d0684af0e579 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java @@ -58,6 +58,7 @@ import static org.hamcrest.Matchers.is; +@SuppressWarnings("removal") public class CCRDocumentationIT extends ESRestHighLevelClientTestCase { @Before diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index 0f1cc0e9db53b..0b3c2103b243d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -95,6 +95,7 @@ * Documentation for CRUD APIs in the high level java client. * Code wrapped in {@code tag} and {@code end} tags is included in the docs. */ +@SuppressWarnings("removal") public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { @SuppressWarnings("unused") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java index 7ee97d6faec1e..dfa0e7b9364d1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -62,6 +62,7 @@ * Documentation for Cluster APIs in the high level java client. * Code wrapped in {@code tag} and {@code end} tags is included in the docs. */ +@SuppressWarnings("removal") public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase { public void testClusterPutSettings() throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/EnrichDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/EnrichDocumentationIT.java index 4bcb3ac0ee4c0..993e76ed7973d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/EnrichDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/EnrichDocumentationIT.java @@ -32,6 +32,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +@SuppressWarnings("removal") public class EnrichDocumentationIT extends ESRestHighLevelClientTestCase { @After diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java index e76116cdbef02..9a4531a1cbe24 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/GraphDocumentationIT.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.util.Collection; +@SuppressWarnings("removal") public class GraphDocumentationIT extends ESRestHighLevelClientTestCase { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java index 9f8e09cd1951d..33f4b45d648ba 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java @@ -85,6 +85,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +@SuppressWarnings("removal") public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { public void testPutLifecyclePolicy() throws Exception { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index dd714574476a7..3dac417e5244c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -55,22 +55,21 @@ import org.elasticsearch.client.indices.DeleteAliasRequest; import org.elasticsearch.client.indices.DeleteComposableIndexTemplateRequest; import org.elasticsearch.client.indices.DetailAnalyzeResponse; -import org.elasticsearch.client.indices.FreezeIndexRequest; +import org.elasticsearch.client.indices.GetComposableIndexTemplateRequest; +import org.elasticsearch.client.indices.GetComposableIndexTemplatesResponse; import org.elasticsearch.client.indices.GetFieldMappingsRequest; import org.elasticsearch.client.indices.GetFieldMappingsResponse; import org.elasticsearch.client.indices.GetIndexRequest; import org.elasticsearch.client.indices.GetIndexResponse; -import org.elasticsearch.client.indices.GetComposableIndexTemplateRequest; import org.elasticsearch.client.indices.GetIndexTemplatesRequest; import org.elasticsearch.client.indices.GetIndexTemplatesResponse; -import org.elasticsearch.client.indices.GetComposableIndexTemplatesResponse; import org.elasticsearch.client.indices.GetMappingsRequest; import org.elasticsearch.client.indices.GetMappingsResponse; import org.elasticsearch.client.indices.IndexTemplateMetadata; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutComponentTemplateRequest; -import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutComposableIndexTemplateRequest; +import org.elasticsearch.client.indices.PutIndexTemplateRequest; import org.elasticsearch.client.indices.PutMappingRequest; import org.elasticsearch.client.indices.ReloadAnalyzersRequest; import org.elasticsearch.client.indices.ReloadAnalyzersResponse; @@ -90,13 +89,13 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.util.Collections; @@ -108,7 +107,6 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.client.IndicesClientIT.FROZEN_INDICES_DEPRECATION_WARNING; -import static org.elasticsearch.client.IndicesClientIT.IGNORE_THROTTLED_DEPRECATION_WARNING; import static org.elasticsearch.client.IndicesClientIT.LEGACY_TEMPLATE_OPTIONS; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -135,6 +133,7 @@ * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) */ +@SuppressWarnings("removal") public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase { public void testIndicesExist() throws IOException { @@ -2873,89 +2872,6 @@ public void onFailure(Exception e) { } - public void testFreezeIndex() throws Exception { - RestHighLevelClient client = highLevelClient(); - - { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index"), RequestOptions.DEFAULT); - assertTrue(createIndexResponse.isAcknowledged()); - } - - { - // tag::freeze-index-request - FreezeIndexRequest request = new FreezeIndexRequest("index"); // <1> - // end::freeze-index-request - - // tag::freeze-index-request-timeout - request.setTimeout(TimeValue.timeValueMinutes(2)); // <1> - // end::freeze-index-request-timeout - // tag::freeze-index-request-masterTimeout - request.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1> - // end::freeze-index-request-masterTimeout - // tag::freeze-index-request-waitForActiveShards - request.setWaitForActiveShards(ActiveShardCount.DEFAULT); // <1> - // end::freeze-index-request-waitForActiveShards - - // tag::freeze-index-request-indicesOptions - request.setIndicesOptions(IndicesOptions.strictExpandOpen()); // <1> - // end::freeze-index-request-indicesOptions - - final RequestOptions freezeIndexOptions = RequestOptions.DEFAULT.toBuilder() - .setWarningsHandler( - warnings -> List.of(FROZEN_INDICES_DEPRECATION_WARNING, IGNORE_THROTTLED_DEPRECATION_WARNING).equals(warnings) == false - ).build(); - - // tag::freeze-index-execute - ShardsAcknowledgedResponse openIndexResponse = client.indices().freeze(request, freezeIndexOptions); - // end::freeze-index-execute - - // tag::freeze-index-response - boolean acknowledged = openIndexResponse.isAcknowledged(); // <1> - boolean shardsAcked = openIndexResponse.isShardsAcknowledged(); // <2> - // end::freeze-index-response - assertTrue(acknowledged); - assertTrue(shardsAcked); - - // tag::freeze-index-execute-listener - ActionListener listener = - new ActionListener() { - @Override - public void onResponse(ShardsAcknowledgedResponse freezeIndexResponse) { - // <1> - } - - @Override - public void onFailure(Exception e) { - // <2> - } - }; - // end::freeze-index-execute-listener - - // Replace the empty listener by a blocking listener in test - final CountDownLatch latch = new CountDownLatch(1); - listener = new LatchedActionListener<>(listener, latch); - - // tag::freeze-index-execute-async - client.indices().freezeAsync(request, RequestOptions.DEFAULT, listener); // <1> - // end::freeze-index-execute-async - - assertTrue(latch.await(30L, TimeUnit.SECONDS)); - } - - { - // tag::freeze-index-notfound - try { - FreezeIndexRequest request = new FreezeIndexRequest("does_not_exist"); - client.indices().freeze(request, RequestOptions.DEFAULT); - } catch (ElasticsearchException exception) { - if (exception.status() == RestStatus.BAD_REQUEST) { - // <1> - } - } - // end::freeze-index-notfound - } - } - public void testUnfreezeIndex() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java index 18958045ebcf1..d7ba8113f00b3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java @@ -54,6 +54,7 @@ * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) */ +@SuppressWarnings("removal") public class IngestClientDocumentationIT extends ESRestHighLevelClientTestCase { public void testPutPipeline() throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/LicensingDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/LicensingDocumentationIT.java index aefefd210ca09..f6a39abfe2b14 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/LicensingDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/LicensingDocumentationIT.java @@ -49,6 +49,7 @@ * Documentation for Licensing APIs in the high level java client. * Code wrapped in {@code tag} and {@code end} tags is included in the docs. */ +@SuppressWarnings("removal") public class LicensingDocumentationIT extends ESRestHighLevelClientTestCase { @BeforeClass diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java index 7987f33d0eebb..44b0e9e30be9f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationClientDocumentationIT.java @@ -42,6 +42,7 @@ * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) */ +@SuppressWarnings("removal") public class MigrationClientDocumentationIT extends ESRestHighLevelClientTestCase { public void testGetDeprecationInfo() throws IOException, InterruptedException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java index 877caf3980d92..9fce69bafca37 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java @@ -44,6 +44,7 @@ * include-tagged::{doc-tests}/MigrationDocumentationIT.java[example] * -------------------------------------------------- */ +@SuppressWarnings("removal") public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase { public void testClusterHealth() throws IOException { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java index 5f4da866650a6..977a4c4c4b443 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java @@ -38,6 +38,7 @@ * Documentation for miscellaneous APIs in the high level java client. * Code wrapped in {@code tag} and {@code end} tags is included in the docs. */ +@SuppressWarnings("removal") public class MiscellaneousDocumentationIT extends ESRestHighLevelClientTestCase { public void testMain() throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 6d6a230783354..b41e40122c3fa 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -235,6 +235,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.core.Is.is; +@SuppressWarnings("removal") public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { private static final RequestOptions POST_DATA_OPTIONS = RequestOptions.DEFAULT.toBuilder() diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java index c376ba03eaf00..ba8e683f33333 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java @@ -71,6 +71,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.oneOf; +@SuppressWarnings("removal") public class RollupDocumentationIT extends ESRestHighLevelClientTestCase { @Before diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index cf937f1f78a71..c8016be8168bf 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -122,6 +122,7 @@ * Documentation for search APIs in the high level java client. * Code wrapped in {@code tag} and {@code end} tags is included in the docs. */ +@SuppressWarnings("removal") public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { @SuppressWarnings({"unused", "unchecked"}) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchableSnapshotsDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchableSnapshotsDocumentationIT.java index 54622027d3760..05e2289390c65 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchableSnapshotsDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchableSnapshotsDocumentationIT.java @@ -41,6 +41,7 @@ import static org.hamcrest.Matchers.is; +@SuppressWarnings("removal") public class SearchableSnapshotsDocumentationIT extends ESRestHighLevelClientTestCase { public void testMountSnapshot() throws IOException, InterruptedException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index 987a6569fed2a..1d4f3999d44c6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -158,6 +158,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +@SuppressWarnings("removal") public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase { @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java index ed0e9b0ca941a..5383333b63e34 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java @@ -80,6 +80,7 @@ * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) */ +@SuppressWarnings("removal") public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase { private static final String repositoryName = "test_repository"; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java index 88f2e49414ed5..d6bcf973e3a94 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/StoredScriptsDocumentationIT.java @@ -55,6 +55,7 @@ * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) */ +@SuppressWarnings("removal") public class StoredScriptsDocumentationIT extends ESRestHighLevelClientTestCase { @SuppressWarnings("unused") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java index 73c217369c3ef..bf5b2320653f5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java @@ -53,6 +53,7 @@ * than 84, the line will be cut and a horizontal scroll bar will be displayed. * (the code indentation of the tag is not included in the width) */ +@SuppressWarnings("removal") public class TasksClientDocumentationIT extends ESRestHighLevelClientTestCase { @SuppressWarnings("unused") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TextStructureClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TextStructureClientDocumentationIT.java index f6fd9bd6f49d7..5e6a8233f3785 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TextStructureClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TextStructureClientDocumentationIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.client.textstructure.FindStructureResponse; import org.elasticsearch.client.textstructure.structurefinder.TextStructure; +@SuppressWarnings("removal") public class TextStructureClientDocumentationIT extends ESRestHighLevelClientTestCase { public void testFindStructure() throws Exception { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TransformDocumentationIT.java index e9bebf3dc786a..acb31798d6586 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TransformDocumentationIT.java @@ -66,6 +66,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +@SuppressWarnings("removal") public class TransformDocumentationIT extends ESRestHighLevelClientTestCase { private List transformsToClean = new ArrayList<>(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/WatcherDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/WatcherDocumentationIT.java index a2a5d0680409e..64de29bee3ad4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/WatcherDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/WatcherDocumentationIT.java @@ -49,6 +49,7 @@ import static org.hamcrest.Matchers.is; +@SuppressWarnings("removal") public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase { public void testStartStopWatchService() throws Exception { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetComponentTemplatesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetComponentTemplatesResponseTests.java index 8099370e02fa9..5f5070d3f4a11 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetComponentTemplatesResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetComponentTemplatesResponseTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.test.ESTestCase; @@ -79,7 +80,7 @@ private static void toXContent(GetComponentTemplatesResponse response, XContentB builder.startObject(); builder.field("name", e.getKey()); builder.field("component_template"); - e.getValue().toXContent(builder, null); + e.getValue().toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); } builder.endArray(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetComposableIndexTemplatesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetComposableIndexTemplatesResponseTests.java index b93af425dd932..a241fb235b2b6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetComposableIndexTemplatesResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetComposableIndexTemplatesResponseTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -53,7 +54,7 @@ private static void toXContent(GetComposableIndexTemplatesResponse response, XCo builder.startObject(); builder.field("name", e.getKey()); builder.field("index_template"); - e.getValue().toXContent(builder, null); + e.getValue().toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); } builder.endArray(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/GetFeatureUpgradeStatusResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/GetFeatureUpgradeStatusResponseTests.java index f6483dd1651b5..7b3bb7cf6004b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/GetFeatureUpgradeStatusResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/GetFeatureUpgradeStatusResponseTests.java @@ -40,10 +40,14 @@ protected org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStat randomAlphaOfLengthBetween(3, 20), randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), randomFrom(org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.values()), - randomList(4, - () -> new org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.IndexVersion( + randomList( + 4, + () -> new org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.IndexInfo( randomAlphaOfLengthBetween(3, 20), - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()))) + randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + null + ) + ) )), randomFrom(org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.values()) ); @@ -78,12 +82,12 @@ protected void assertInstances( assertThat(clientStatus.getIndexVersions(), hasSize(serverTestStatus.getIndexVersions().size())); for (int j = 0; i < clientStatus.getIndexVersions().size(); i++) { - org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.IndexVersion serverIndexVersion + org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.IndexInfo serverIndexInfo = serverTestStatus.getIndexVersions().get(j); GetFeatureUpgradeStatusResponse.IndexVersion clientIndexVersion = clientStatus.getIndexVersions().get(j); - assertThat(clientIndexVersion.getIndexName(), equalTo(serverIndexVersion.getIndexName())); - assertThat(clientIndexVersion.getVersion(), equalTo(serverIndexVersion.getVersion().toString())); + assertThat(clientIndexVersion.getIndexName(), equalTo(serverIndexInfo.getIndexName())); + assertThat(clientIndexVersion.getVersion(), equalTo(serverIndexInfo.getVersion().toString())); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/UpgradeTransformsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/UpgradeTransformsResponseTests.java index 11a4b2c77fa31..e810e36709541 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/UpgradeTransformsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/transform/UpgradeTransformsResponseTests.java @@ -37,15 +37,9 @@ private static UpgradeTransformsResponse createTestInstance() { private static void toXContent(UpgradeTransformsResponse response, XContentBuilder builder) throws IOException { builder.startObject(); - if (response.getUpdated() != 0) { - builder.field("updated", response.getUpdated()); - } - if (response.getNoAction() != 0) { - builder.field("no_action", response.getNoAction()); - } - if (response.getNeedsUpdate() != 0) { - builder.field("needs_update", response.getNeedsUpdate()); - } + builder.field("updated", response.getUpdated()); + builder.field("no_action", response.getNoAction()); + builder.field("needs_update", response.getNeedsUpdate()); builder.endObject(); } diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 3fa58b997c9ef..4fc00d0b6589e 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -44,7 +44,6 @@ dependencies { testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testImplementation "junit:junit:${versions.junit}" testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" - testImplementation "org.elasticsearch:securemock:${versions.securemock}" testImplementation "org.elasticsearch:mocksocket:${versions.mocksocket}" } diff --git a/client/rest/src/main/java/org/elasticsearch/client/Response.java b/client/rest/src/main/java/org/elasticsearch/client/Response.java index fe5152d956db8..cfe313c744c52 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Response.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Response.java @@ -136,14 +136,14 @@ public HttpEntity getEntity() { /** * Tests if a string matches the RFC 7234 specification for warning headers. - * This assumes that the warn code is always 299 and the warn agent is always - * Elasticsearch. + * This assumes that the warn code is always 299 or 300 and the warn agent is + * always Elasticsearch. * * @param s the value of a warning header formatted according to RFC 7234 * @return {@code true} if the input string matches the specification */ private static boolean matchWarningHeaderPatternByPrefix(final String s) { - return s.startsWith("299 Elasticsearch-"); + return s.startsWith("299 Elasticsearch-") || s.startsWith("300 Elasticsearch-"); } /** diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index e15e4cb239938..7846758fc087b 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -54,7 +54,6 @@ import org.mockito.ArgumentCaptor; import org.mockito.stubbing.Answer; -import javax.net.ssl.SSLHandshakeException; import java.io.IOException; import java.io.PrintWriter; import java.io.StringWriter; @@ -72,6 +71,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicReference; +import javax.net.ssl.SSLHandshakeException; import static java.util.Collections.singletonList; import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes; @@ -87,6 +87,7 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -124,7 +125,7 @@ public void createRestClient() { static CloseableHttpAsyncClient mockHttpClient(final ExecutorService exec) { CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); when(httpClient.execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), any(FutureCallback.class))).thenAnswer((Answer>) invocationOnMock -> { + any(HttpClientContext.class), nullable(FutureCallback.class))).thenAnswer((Answer>) invocationOnMock -> { final HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; final FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; @@ -202,7 +203,7 @@ public void testInternalHttpRequest() throws Exception { for (String httpMethod : getHttpMethods()) { HttpUriRequest expectedRequest = performRandomRequest(httpMethod); verify(httpClient, times(++times)).execute(requestArgumentCaptor.capture(), - any(HttpAsyncResponseConsumer.class), any(HttpClientContext.class), any(FutureCallback.class)); + any(HttpAsyncResponseConsumer.class), any(HttpClientContext.class), nullable(FutureCallback.class)); HttpUriRequest actualRequest = (HttpUriRequest)requestArgumentCaptor.getValue().generateRequest(); assertEquals(expectedRequest.getURI(), actualRequest.getURI()); assertEquals(expectedRequest.getClass(), actualRequest.getClass()); diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index a04984ecb5937..6d673f5e986fd 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -42,7 +42,6 @@ dependencies { testImplementation project(":client:test") testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testImplementation "junit:junit:${versions.junit}" - testImplementation "org.elasticsearch:securemock:${versions.securemock}" testImplementation "org.elasticsearch:mocksocket:${versions.mocksocket}" } diff --git a/client/test/build.gradle b/client/test/build.gradle index ec36bfe796b0a..dfa2e9cf88be1 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -23,6 +23,11 @@ dependencies { api "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" api "junit:junit:${versions.junit}" api "org.hamcrest:hamcrest:${versions.hamcrest}" + + // mockito + api 'org.mockito:mockito-core:3.12.4' + api 'net.bytebuddy:byte-buddy:1.11.13' + api 'org.objenesis:objenesis:3.2' } tasks.named('forbiddenApisMain').configure { @@ -38,6 +43,9 @@ tasks.named('forbiddenApisTest').configure { replaceSignatureFiles 'jdk-signatures' } +// since this client implementation is going away, third party audit is pointless +tasks.named("thirdPartyAudit").configure { enabled = false } + // JarHell is part of es server, which we don't want to pull in // TODO: Not anymore. Now in :libs:elasticsearch-core tasks.named("jarHell").configure { enabled = false } diff --git a/distribution/packages/src/common/scripts/postinst b/distribution/packages/src/common/scripts/postinst index a2b4c2930851f..5a71fba567869 100644 --- a/distribution/packages/src/common/scripts/postinst +++ b/distribution/packages/src/common/scripts/postinst @@ -72,33 +72,44 @@ if [ "x$IS_UPGRADE" != "xtrue" ]; then ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ ES_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/security-cli \ /usr/share/elasticsearch/bin/elasticsearch-cli); then - echo "########## Security autoconfiguration information ############" - echo "# #" - echo "# Authentication and Authorization are enabled. #" - echo "# TLS for the transport and the http layers is enabled and configured. #" - echo "# #" - echo "# The password of the elastic superuser will be set to: ${INITIAL_PASSWORD} #" - echo "# upon starting elasticsearch for the first time #" - echo "# #" - echo "##############################################################################" + echo "--------------------------- Security autoconfiguration information ------------------------------" + echo + echo "Authentication and authorization are enabled." + echo "TLS for the transport and HTTP layers is enabled and configured." + echo + echo "The generated password for the elastic built-in superuser is : ${INITIAL_PASSWORD}" + echo + echo + echo "You can complete the following actions at any time:" + echo + echo "Reset the password of the elastic built-in superuser with " + echo "'/usr/share/bin/elasticsearch-reset-password -u elastic'." + echo + echo "Generate an enrollment token for Kibana instances with " + echo " 'bin/elasticsearch-create-enrollment-token -s kibana'." + echo + echo "Generate an enrollment token for Elasticsearch nodes with " + echo "'bin/elasticsearch-create-enrollment-token -s node'." + echo + echo "-------------------------------------------------------------------------------------------------" fi else if [ $? -eq 80 ]; then # ExitCodes.NOOP - echo "########## Security autoconfiguration information ############" - echo "# #" - echo "# Security features appear to be already configured. #" - echo "# #" - echo "##############################################################################" + echo "--------------------------- Security autoconfiguration information ------------------------------" + echo + echo "Skipping auto-configuration because security features appear to be already configured." + echo + echo "-------------------------------------------------------------------------------------------------" else - echo "########## Security autoconfiguration information ############" - echo "# #" - echo "# Failed to auto-configure security features. #" - echo "# Authentication and Authorization are enabled. #" - echo "# You can use elasticsearch-reset-elastic-password to set a password #" - echo "# for the elastic user. #" - echo "# #" - echo "##############################################################################" + echo "--------------------------- Security autoconfiguration information ------------------------------" + echo + echo "Failed to auto-configure security features." + echo "However, authentication and authorization are still enabled." + echo + echo "You can reset the password of the elastic built-in superuser with " + echo "'/usr/share/bin/elasticsearch-reset-password -u elastic' at any time." + echo "-------------------------------------------------------------------------------------------------" fi fi if command -v systemctl >/dev/null; then diff --git a/distribution/src/bin/elasticsearch b/distribution/src/bin/elasticsearch index 59aabfc3ec368..7ba208a431e81 100755 --- a/distribution/src/bin/elasticsearch +++ b/distribution/src/bin/elasticsearch @@ -56,7 +56,7 @@ if [[ $ATTEMPT_SECURITY_AUTO_CONFIG = true ]]; then if ES_MAIN_CLASS=org.elasticsearch.xpack.security.cli.ConfigInitialNode \ ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ ES_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/security-cli \ - "`dirname "$0"`"/elasticsearch-cli "$@" <<<"$KEYSTORE_PASSWORD"; then + bin/elasticsearch-cli "$@" <<<"$KEYSTORE_PASSWORD"; then : else retval=$? diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index b95585ea13e43..2038cc5e2efd7 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -54,16 +54,6 @@ endif::[] :javadoc-license: {rest-high-level-client-javadoc}/org/elasticsearch/protocol/xpack/license :javadoc-watcher: {rest-high-level-client-javadoc}/org/elasticsearch/protocol/xpack/watcher -/////// -Permanently unreleased branches (master, n.X) -/////// -ifeval::["{source_branch}"=="master"] -:permanently-unreleased-branch: -endif::[] -ifeval::["{source_branch}"=="{major-version}"] -:permanently-unreleased-branch: -endif::[] - /////// Shared attribute values are pulled from elastic/docs /////// diff --git a/docs/java-rest/redirects.asciidoc b/docs/java-rest/redirects.asciidoc index a077102b405d4..37f331a87bcef 100644 --- a/docs/java-rest/redirects.asciidoc +++ b/docs/java-rest/redirects.asciidoc @@ -47,3 +47,11 @@ See <>. === Stop {transform} API See <>. + +[role="exclude",id="java-rest-high-freeze-index"] +=== Freeze index API + +The freeze index API was removed in 8.0. Frozen indices are no longer useful due +to +https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[recent +improvements in heap memory usage]. diff --git a/docs/plugins/ingest-attachment.asciidoc b/docs/plugins/ingest-attachment.asciidoc index 4658237602d7a..50711023f93ba 100644 --- a/docs/plugins/ingest-attachment.asciidoc +++ b/docs/plugins/ingest-attachment.asciidoc @@ -28,6 +28,7 @@ include::install_remove.asciidoc[] | `indexed_chars_field` | no | `null` | Field name from which you can overwrite the number of chars being used for extraction. See `indexed_chars`. | `properties` | no | all properties | Array of properties to select to be stored. Can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language` | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +| `remove_binary` | no | `false` | If `true`, the binary `field` will be removed from the document | `resource_name` | no | | Field containing the name of the resource to decode. If specified, the processor passes this resource name to the underlying Tika library to enable https://tika.apache.org/1.24.1/detection.html#Resource_Name_Based_Detection[Resource Name Based Detection]. |====== @@ -94,6 +95,9 @@ The document's `attachment` object contains extracted properties for the file: ---- // TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] +NOTE: Keeping the binary as a field within the document might consume a lot of resources. It is highly recommended + to remove that field from the document. Set `remove_binary` to `true` to automatically remove the field. + To extract only certain `attachment` fields, specify the `properties` array: [source,console] diff --git a/docs/reference/aggregations/bucket/categorize-text-aggregation.asciidoc b/docs/reference/aggregations/bucket/categorize-text-aggregation.asciidoc index cc0a0e787f844..c6e191fb23539 100644 --- a/docs/reference/aggregations/bucket/categorize-text-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/categorize-text-aggregation.asciidoc @@ -8,7 +8,8 @@ experimental::[] A multi-bucket aggregation that groups semi-structured text into buckets. Each `text` field is re-analyzed using a custom analyzer. The resulting tokens are then categorized creating buckets of similarly formatted -text values. This aggregation works best with machine generated text like system logs. +text values. This aggregation works best with machine generated text like system logs. Only the first 100 analyzed +tokens are used to categorize the text. NOTE: If you have considerable memory allocated to your JVM but are receiving circuit breaker exceptions from this aggregation, you may be attempting to categorize text that is poorly formatted for categorization. Consider @@ -27,11 +28,13 @@ The semi-structured text field to categorize. The maximum number of unique tokens at any position up to `max_matched_tokens`. Must be larger than 1. Smaller values use less memory and create fewer categories. Larger values will use more memory and create narrower categories. +Max allowed value is `100`. `max_matched_tokens`:: (Optional, integer, default: `5`) The maximum number of token positions to match on before attempting to merge categories. Larger values will use more memory and create narrower categories. +Max allowed value is `100`. Example: `max_matched_tokens` of 2 would disallow merging of the categories @@ -90,7 +93,6 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=tokenizer] (array of strings or objects) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=filter] ===== -end::categorization-analyzer[] `shard_size`:: (Optional, integer) diff --git a/docs/reference/aggregations/metrics/rate-aggregation.asciidoc b/docs/reference/aggregations/metrics/rate-aggregation.asciidoc index cb1f903f6443c..c99dffc8d8b5c 100644 --- a/docs/reference/aggregations/metrics/rate-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/rate-aggregation.asciidoc @@ -7,7 +7,7 @@ ++++ A `rate` metrics aggregation can be used only inside a `date_histogram` or `composite` aggregation. It calculates a rate of documents -or a field in each bucket. The field values can be generated extracted from specific numeric or +or a field in each bucket. The field values can be extracted from specific numeric or <> in the documents. NOTE: For `composite` aggregations, there must be exactly one `date_histogram` source for the `rate` aggregation to be supported. @@ -27,7 +27,7 @@ A `rate` aggregation looks like this in isolation: -------------------------------------------------- // NOTCONSOLE -The following request will group all sales records into monthly bucket and than convert the number of sales transaction in each bucket +The following request will group all sales records into monthly buckets and then convert the number of sales transactions in each bucket into per annual sales rate. [source,console] @@ -56,8 +56,8 @@ GET sales/_search <1> Histogram is grouped by month. <2> But the rate is converted into annual rate. -The response will return the annual rate of transaction in each bucket. Since there are 12 months per year, the annual rate will -be automatically calculated by multiplying monthly rate by 12. +The response will return the annual rate of transactions in each bucket. Since there are 12 months per year, the annual rate will +be automatically calculated by multiplying the monthly rate by 12. [source,console-result] -------------------------------------------------- diff --git a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc index 030732b997340..7bd6bb6f0e85a 100644 --- a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc @@ -127,7 +127,7 @@ return irrelevant results. For example, if the `max_gram` is `3` and search terms are truncated to three characters, the search term `apple` is shortened to `app`. This means searches -for `apple` return any indexed terms matching `app`, such as `apply`, `snapped`, +for `apple` return any indexed terms matching `app`, such as `apply`, `approximate` and `apple`. We recommend testing both approaches to see which best fits your diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc index e958a29d75bbc..92273b15e2e5e 100644 --- a/docs/reference/cluster/health.asciidoc +++ b/docs/reference/cluster/health.asciidoc @@ -97,6 +97,11 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] provided or better, i.e. `green` > `yellow` > `red`. By default, will not wait for any status. +`return_200_for_cluster_health_timeout`:: + (Optional, Boolean) A boolean value which controls whether to return HTTP 200 + status code instead of HTTP 408 in case of a cluster health timeout from + the server side. Defaults to false. + [[cluster-health-api-response-body]] ==== {api-response-body-title} diff --git a/docs/reference/commands/index.asciidoc b/docs/reference/commands/index.asciidoc index 086ee00f49690..3aa564d1a4017 100644 --- a/docs/reference/commands/index.asciidoc +++ b/docs/reference/commands/index.asciidoc @@ -13,7 +13,7 @@ tasks from the command line: * <> * <> * <> -* <> +* <> * <> * <> * <> @@ -28,7 +28,7 @@ include::create-enrollment-token.asciidoc[] include::croneval.asciidoc[] include::keystore.asciidoc[] include::node-tool.asciidoc[] -include::reset-elastic-password.asciidoc[] +include::reset-password.asciidoc[] include::saml-metadata.asciidoc[] include::service-tokens-command.asciidoc[] include::setup-passwords.asciidoc[] diff --git a/docs/reference/commands/reset-elastic-password.asciidoc b/docs/reference/commands/reset-password.asciidoc similarity index 50% rename from docs/reference/commands/reset-elastic-password.asciidoc rename to docs/reference/commands/reset-password.asciidoc index a440a43844fc9..012874fd61171 100644 --- a/docs/reference/commands/reset-elastic-password.asciidoc +++ b/docs/reference/commands/reset-password.asciidoc @@ -1,45 +1,49 @@ [roles="xpack"] -[[reset-elastic-password]] -== elasticsearch-reset-elastic-password +[[reset-password]] +== elasticsearch-reset-password + +The `elasticsearch-reset-password` command resets the passwords of users in +the native realm and built-in users. -The `elasticsearch-reset-elastic-password` command resets the password for the -`elastic` <>. [discrete] === Synopsis [source,shell] ---- -bin/elasticsearch-reset-elastic-password +bin/elasticsearch-reset-password [-a, --auto] [-b, --batch] [-E > to run the request -that changes the `elastic` user password. +Use this command to reset the password of any user in the native realm +or any built-in user. By default, a strong password is generated for you. +To explicitly set a password, run the tool in interactive mode with `-i`. +The command generates (and subsequently removes) a temporary user in the +<> to run the request that changes the user password. + IMPORTANT: You cannot use this tool if the file realm is disabled in your `elasticsearch.yml` file. This command uses an HTTP connection to connect to the cluster and run the user management requests. The command automatically attempts to establish the connection over HTTPS by using the `xpack.security.http.ssl` settings in -the `elasticsearch.yml` file. If you do not use the default config directory +the `elasticsearch.yml` file. If you do not use the default configuration directory location, ensure that the `ES_PATH_CONF` environment variable returns the -correct path before you run the `elasticsearch-reset-elastic-password` command. You can +correct path before you run the `elasticsearch-reset-password` command. You can override settings in your `elasticsearch.yml` file by using the `-E` command option. For more information about debugging connection failures, see <>. [discrete] -[[reset-elastic-password-parameters]] +[[reset-password-parameters]] === Parameters -`-a, --auto`:: Resets the password of the `elastic` user to an auto-generated strong password. (Default) + +`-a, --auto`:: Resets the password of the specified user to an auto-generated strong password. (Default) `-b, --batch`:: Runs the reset password process without prompting the user for verification. @@ -49,8 +53,13 @@ option. For more information about debugging connection failures, see `-h, --help`:: Returns all of the command parameters. -`-i, --interactive`:: Prompts the user for the password of the `elastic` user. Use this option to explicitly set a password. +`-i, --interactive`:: Prompts for the password of the specified user. Use this option to explicitly set a password. + +`-s --silent`:: Shows minimal output in the console. + +`-u, --username`:: The username of the native realm user or built-in user. +`-v --verbose`:: Shows verbose output in the console. [discrete] === Examples @@ -59,5 +68,13 @@ prints the new password in the console. [source,shell] ---- -bin/elasticsearch-reset-elastic-password +bin/elasticsearch-reset-password -u elastic +---- + +The following example resets the password of a native user with username `user1` after prompting +in the terminal for the desired password: + +[source,shell] +---- +bin/elasticsearch-reset-password --username user1 -i ---- diff --git a/docs/reference/commands/setup-passwords.asciidoc b/docs/reference/commands/setup-passwords.asciidoc index 7a443b492d470..2e34575af3296 100644 --- a/docs/reference/commands/setup-passwords.asciidoc +++ b/docs/reference/commands/setup-passwords.asciidoc @@ -3,6 +3,8 @@ [[setup-passwords]] == elasticsearch-setup-passwords +deprecated[8.0, "The `elasticsearch-setup-passwords` tool is deprecated and will be removed in a future release. To manually reset the password for the built-in users (including the `elastic` user), use the <> tool, the {es} change password API, or the User Management features in {kib}."] + The `elasticsearch-setup-passwords` command sets the passwords for the <>. diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc index 28067768298e6..4a74cae5f8bb8 100644 --- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc +++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc @@ -514,7 +514,7 @@ index doesn’t grow too large while waiting for the rollover check. By default, {ilm-init} checks rollover conditions every 10 minutes. + -- -The following <> request +The following <> request lowers the `indices.lifecycle.poll_interval` setting to `1m` (one minute). [source,console] @@ -651,8 +651,8 @@ original value when reindexing is complete. This prevents unnecessary load on the master node. + -- -The following update cluster settings API request resets the -`indices.lifecycle.poll_interval` setting to its default value, 10 minutes. +The following cluster update settings API request resets the +`indices.lifecycle.poll_interval` setting to its default value. [source,console] ---- diff --git a/docs/reference/data-streams/data-streams.asciidoc b/docs/reference/data-streams/data-streams.asciidoc index 4e124e9b8055a..25e084e6e7c42 100644 --- a/docs/reference/data-streams/data-streams.asciidoc +++ b/docs/reference/data-streams/data-streams.asciidoc @@ -67,7 +67,6 @@ such as: * <> * <> -* <> * <> * <> diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index cb09ce068f219..8b08a92a1d9f3 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -297,8 +297,7 @@ Removes the specified document from the index. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=bulk-index] `_id`:: -(Required, string) -The document ID. +(Required, string) The document ID. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=bulk-require-alias] -- @@ -327,7 +326,8 @@ The following line must contain the partial document and update options. -- include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=bulk-index] -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=bulk-id] +`_id`:: +(Required, string) The document ID. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=bulk-require-alias] -- diff --git a/docs/reference/eql/eql.asciidoc b/docs/reference/eql/eql.asciidoc index da39f837a7d17..c518381520b3b 100644 --- a/docs/reference/eql/eql.asciidoc +++ b/docs/reference/eql/eql.asciidoc @@ -802,7 +802,7 @@ The EQL search API supports <>. However, the local and <> must use the same {es} version. -The following <> request +The following <> request adds two remote clusters: `cluster_one` and `cluster_two`. [source,console] diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index a5679bfe570f8..99b015e4f717a 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -507,7 +507,7 @@ include::{es-repo-dir}/tab-widgets/quick-start-cleanup-widget.asciidoc[] * Use {fleet} and {agent} to collect logs and metrics directly from your data sources and send them to {es}. See the -{fleet-guide}/fleet-quick-start.html[{fleet} quick start guide]. +{observability-guide}/ingest-logs-metrics-uptime.html[Ingest logs, metrics, and uptime data with {agent}]. * Use {kib} to explore, visualize, and manage your {es} data. See the {kibana-ref}/get-started.html[{kib} quick start guide]. diff --git a/docs/reference/how-to/fix-common-cluster-issues.asciidoc b/docs/reference/how-to/fix-common-cluster-issues.asciidoc index 6c9e0ab508265..4924b64947598 100644 --- a/docs/reference/how-to/fix-common-cluster-issues.asciidoc +++ b/docs/reference/how-to/fix-common-cluster-issues.asciidoc @@ -504,7 +504,7 @@ GET _cluster/settings?flat_settings=true&include_defaults=true // TEST[s/^/PUT my-index\n/] You can change the settings using the <> and <> APIs. +settings>> and <> APIs. **Allocate or reduce replicas** diff --git a/docs/reference/how-to/size-your-shards.asciidoc b/docs/reference/how-to/size-your-shards.asciidoc index 3696c9df1bec8..3e92e8f15349c 100644 --- a/docs/reference/how-to/size-your-shards.asciidoc +++ b/docs/reference/how-to/size-your-shards.asciidoc @@ -322,8 +322,8 @@ setting limits the maximum number of open shards for a cluster. This error indicates an action would exceed this limit. If you're confident your changes won't destabilize the cluster, you can -temporarily increase the limit using the <> and retry the action. +temporarily increase the limit using the <> and retry the action. [source,console] ---- diff --git a/docs/reference/ilm/actions/ilm-freeze.asciidoc b/docs/reference/ilm/actions/ilm-freeze.asciidoc deleted file mode 100644 index db413545c0910..0000000000000 --- a/docs/reference/ilm/actions/ilm-freeze.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -[role="xpack"] -[[ilm-freeze]] -=== Freeze - -Phases allowed: cold. - -<> an index. - -deprecated[7.x,"The ILM Freeze action was deprecated in 7.x and will be treated as a no-op in 8.0+."] - -[[ilm-freeze-options]] -==== Options - -None. - -[[ilm-freeze-ex]] -==== Example - -[source,console] --------------------------------------------------- -PUT _ilm/policy/my_policy -{ - "policy": { - "phases": { - "cold": { - "actions": { - "freeze" : { } - } - } - } - } -} --------------------------------------------------- - diff --git a/docs/reference/ilm/actions/ilm-searchable-snapshot.asciidoc b/docs/reference/ilm/actions/ilm-searchable-snapshot.asciidoc index 428d071c1e36a..fa4a32f8b6a17 100644 --- a/docs/reference/ilm/actions/ilm-searchable-snapshot.asciidoc +++ b/docs/reference/ilm/actions/ilm-searchable-snapshot.asciidoc @@ -18,7 +18,7 @@ index>> to the frozen tier. In other phases, the action mounts a <> to the corresponding data tier. If the `searchable_snapshot` action is used in the hot phase the subsequent -phases cannot include the `shrink`, `forcemerge`, or `freeze` actions. +phases cannot include the `shrink` or `forcemerge` actions. This action cannot be performed on a data stream's write index. Attempts to do so will fail. To convert the index to a searchable snapshot, first diff --git a/docs/reference/ilm/ilm-actions.asciidoc b/docs/reference/ilm/ilm-actions.asciidoc index 4b3e456d00230..cbe6923e3cd0d 100644 --- a/docs/reference/ilm/ilm-actions.asciidoc +++ b/docs/reference/ilm/ilm-actions.asciidoc @@ -14,9 +14,6 @@ Permanently remove the index. Reduce the number of index segments and purge deleted documents. Makes the index read-only. -<>:: -Freeze the index to minimize its memory footprint. - <>:: Move the index shards to the <> that corresponds to the current {ilm-init} phase. @@ -49,7 +46,6 @@ Ensure that a snapshot exists before deleting the index. include::actions/ilm-allocate.asciidoc[] include::actions/ilm-delete.asciidoc[] include::actions/ilm-forcemerge.asciidoc[] -include::actions/ilm-freeze.asciidoc[] include::actions/ilm-migrate.asciidoc[] include::actions/ilm-readonly.asciidoc[] include::actions/ilm-rollover.asciidoc[] diff --git a/docs/reference/ilm/ilm-index-lifecycle.asciidoc b/docs/reference/ilm/ilm-index-lifecycle.asciidoc index a6f5fd755bf54..9f5e871cb2ad4 100644 --- a/docs/reference/ilm/ilm-index-lifecycle.asciidoc +++ b/docs/reference/ilm/ilm-index-lifecycle.asciidoc @@ -106,7 +106,6 @@ actions in the order listed. - <> - <> - <> - - <> * Frozen - <> * Delete diff --git a/docs/reference/ilm/ilm-overview.asciidoc b/docs/reference/ilm/ilm-overview.asciidoc index bc00a43bcabff..fa65d91d6e7f7 100644 --- a/docs/reference/ilm/ilm-overview.asciidoc +++ b/docs/reference/ilm/ilm-overview.asciidoc @@ -7,8 +7,8 @@ Overview ++++ -You can create and apply {ilm-cap} ({ilm-init}) policies to automatically manage your indices -according to your performance, resiliency, and retention requirements. +You can create and apply {ilm-cap} ({ilm-init}) policies to automatically manage your indices +according to your performance, resiliency, and retention requirements. Index lifecycle policies can trigger actions such as: @@ -17,7 +17,6 @@ size, number of docs, or age. * **Shrink**: Reduces the number of primary shards in an index. * **Force merge**: Triggers a <> to reduce the number of segments in an index's shards. -* **Freeze**: <> an index and makes it read-only. * **Delete**: Permanently remove an index, including all of its data and metadata. @@ -48,9 +47,9 @@ hardware. [IMPORTANT] =========================== -To use {ilm-init}, all nodes in a cluster must run the same version. -Although it might be possible to create and apply policies in a mixed-version cluster, +To use {ilm-init}, all nodes in a cluster must run the same version. +Although it might be possible to create and apply policies in a mixed-version cluster, there is no guarantee they will work as intended. Attempting to use a policy that contains actions that aren't -supported on all nodes in a cluster will cause errors. +supported on all nodes in a cluster will cause errors. =========================== diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index 650a6239cdbab..335757c8a1813 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -18,7 +18,6 @@ index settings, aliases, mappings, and index templates. * <> * <> * <> -* <> * <> * <> @@ -53,9 +52,9 @@ index settings, aliases, mappings, and index templates. === Index templates: Index templates automatically apply settings, mappings, and aliases to new indices. -They are most often used to configure rolling indices for time series data to -ensure that each new index has the same configuration as the previous one. -The index template associated with a data stream configures its backing indices. +They are most often used to configure rolling indices for time series data to +ensure that each new index has the same configuration as the previous one. +The index template associated with a data stream configures its backing indices. For more information, see <>. * <> @@ -113,7 +112,6 @@ include::indices/indices-exists.asciidoc[] include::indices/field-usage-stats.asciidoc[] include::indices/flush.asciidoc[] include::indices/forcemerge.asciidoc[] -include::indices/apis/freeze.asciidoc[] include::indices/get-alias.asciidoc[] include::indices/get-component-template.asciidoc[] include::indices/get-field-mapping.asciidoc[] diff --git a/docs/reference/indices/apis/freeze.asciidoc b/docs/reference/indices/apis/freeze.asciidoc deleted file mode 100644 index 2a18939fbf1bd..0000000000000 --- a/docs/reference/indices/apis/freeze.asciidoc +++ /dev/null @@ -1,62 +0,0 @@ -[role="xpack"] -[testenv="basic"] -[[freeze-index-api]] -=== Freeze index API -++++ -Freeze index -++++ - -// tag::freeze-api-dep[] -deprecated::[7.14, Frozen indices are no longer useful due to https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[recent improvements in heap memory usage].] -// end::freeze-api-dep[] - -Freezes an index. - -[[freeze-index-api-request]] -==== {api-request-title} - -`POST //_freeze` - -[[freeze-index-api-prereqs]] -==== {api-prereq-title} - -* If the {es} {security-features} are enabled, you must have the `manage` -<> for the target index or index alias. - -[[freeze-index-api-desc]] -==== {api-description-title} - -A frozen index has almost no overhead on the cluster (except for maintaining its -metadata in memory) and is read-only. Read-only indices are blocked for write -operations, such as <> or <>. See <>. - -The current write index on a data stream cannot be frozen. In order to freeze -the current write index, the data stream must first be -<> so that a new write index is created -and then the previous write index can be frozen. - -IMPORTANT: Freezing an index will close the index and reopen it within the same -API call. This causes primaries to not be allocated for a short amount of time -and causes the cluster to go red until the primaries are allocated again. This -limitation might be removed in the future. - -[[freeze-index-api-path-parms]] -==== {api-path-parms-title} - -``:: - (Required, string) Identifier for the index. - -[[freeze-index-api-examples]] -==== {api-examples-title} - -The following example freezes and unfreezes an index: - -[source,console] --------------------------------------------------- -POST /my-index-000001/_freeze -POST /my-index-000001/_unfreeze --------------------------------------------------- -// TEST[skip:unable to ignore deprecation warning] -// TEST[s/^/PUT my-index-000001\n/] - diff --git a/docs/reference/indices/apis/unfreeze.asciidoc b/docs/reference/indices/apis/unfreeze.asciidoc index 5176faf8bce97..e9869316dbb72 100644 --- a/docs/reference/indices/apis/unfreeze.asciidoc +++ b/docs/reference/indices/apis/unfreeze.asciidoc @@ -6,7 +6,17 @@ Unfreeze index ++++ -deprecated::[7.14, Frozen indices are no longer useful due to https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[recent improvements in heap memory usage].] +[WARNING] +.Deprecated in 7.14 +==== +In 8.0, we removed the ability to freeze an index. In previous versions, +freezing an index reduced its memory overhead. However, frozen indices are no +longer useful due to +https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[recent +improvements in heap memory usage]. +You can use this API to unfreeze indices that were frozen in 7.x. Frozen indices +are not related to the frozen data tier. +==== Unfreezes an index. @@ -25,12 +35,7 @@ Unfreezes an index. ==== {api-description-title} When a frozen index is unfrozen, the index goes through the normal recovery -process and becomes writeable again. See <>. - -IMPORTANT: Freezing an index will close the index and reopen it within the same -API call. This causes primaries to not be allocated for a short amount of time -and causes the cluster to go red until the primaries are allocated again. This -limitation might be removed in the future. +process and becomes writeable again. [[unfreeze-index-api-path-parms]] ==== {api-path-parms-title} @@ -41,11 +46,10 @@ limitation might be removed in the future. [[unfreeze-index-api-examples]] ==== {api-examples-title} -The following example freezes and unfreezes an index: +The following example unfreezes an index: [source,console] -------------------------------------------------- -POST /my-index-000001/_freeze POST /my-index-000001/_unfreeze -------------------------------------------------- // TEST[s/^/PUT my-index-000001\n/] diff --git a/docs/reference/indices/index-mgmt.asciidoc b/docs/reference/indices/index-mgmt.asciidoc index 0543da9b2a562..5bbb2044ca082 100644 --- a/docs/reference/indices/index-mgmt.asciidoc +++ b/docs/reference/indices/index-mgmt.asciidoc @@ -15,7 +15,7 @@ You'll learn how to: * View and edit index settings. * View mappings and statistics for an index. -* Perform index-level operations, such as refreshes and freezes. +* Perform index-level operations, such as refreshes. * View and manage data streams. * Create index templates to automatically configure new data streams and indices. @@ -43,9 +43,8 @@ Open {kib}'s main menu and click *Stack Management > Index Management*. image::images/index-mgmt/management_index_labels.png[Index Management UI] The *Index Management* page contains an overview of your indices. -Badges indicate if an index is <>, a -<>, or a -<>. +Badges indicate if an index is a <>, a +<>, or <>. Clicking a badge narrows the list to only indices of that type. You can also filter indices using the search bar. @@ -68,7 +67,6 @@ indices on the overview page. The menu includes the following actions: * <> * <> * <> -* <> * <> * *Add* <> diff --git a/docs/reference/indices/resolve.asciidoc b/docs/reference/indices/resolve.asciidoc index 7f9c29dd1bcd3..b946949eb53c2 100644 --- a/docs/reference/indices/resolve.asciidoc +++ b/docs/reference/indices/resolve.asciidoc @@ -18,8 +18,6 @@ PUT /remotecluster-bar-01 PUT /freeze-index -POST /freeze-index/_freeze - PUT /my-index-000001 PUT /freeze-index/_alias/f-alias @@ -110,7 +108,6 @@ The API returns the following response: "f-alias" ], "attributes": [ - "frozen", "open" ] }, diff --git a/docs/reference/ingest.asciidoc b/docs/reference/ingest.asciidoc index b0d6859a051f3..9e4d3dc5244e9 100644 --- a/docs/reference/ingest.asciidoc +++ b/docs/reference/ingest.asciidoc @@ -91,8 +91,10 @@ PUT _ingest/pipeline/my-pipeline === Manage pipeline versions When you create or update a pipeline, you can specify an optional `version` -integer. {es} doesn't use this `version` number internally, but you can use it -to track changes to a pipeline. +integer. You can use this version number with the +<> parameter to conditionally +update the pipeline. When the `if_version` parameter is specified, a successful +update increments the pipeline's version. [source,console] ---- @@ -432,8 +434,7 @@ If you run {agent} standalone, you can apply pipelines using an <> or <> index setting. Alternatively, you can specify the `pipeline` policy setting in your `elastic-agent.yml` -configuration. See {fleet-guide}/run-elastic-agent-standalone.html[Run {agent} -standalone]. +configuration. See {fleet-guide}/install-standalone-elastic-agent.html[Install standalone {agent}s]. [discrete] [[access-source-fields]] diff --git a/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc b/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc index 26ab8e1afbd58..5701db4ee18bf 100644 --- a/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc +++ b/docs/reference/ingest/apis/enrich/put-enrich-policy.asciidoc @@ -98,6 +98,11 @@ Matches enrich data to incoming documents based on a Matches enrich data to incoming documents based on a <>. For an example, see <>. + +`range`::: +Matches a number, date, or IP address in incoming documents to a range in the +enrich index based on a <>. For an example, +see <>. -- + .Properties of `` diff --git a/docs/reference/ingest/apis/put-pipeline.asciidoc b/docs/reference/ingest/apis/put-pipeline.asciidoc index 7bab977266c07..5b73a7803fdda 100644 --- a/docs/reference/ingest/apis/put-pipeline.asciidoc +++ b/docs/reference/ingest/apis/put-pipeline.asciidoc @@ -48,8 +48,12 @@ PUT _ingest/pipeline/my-pipeline-id [[put-pipeline-api-query-params]] ==== {api-query-parms-title} -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] +`if_version`:: +(Optional, integer) Perform the operation only if the pipeline has this +version. If specified and the update is successful, the pipeline's +version is incremented. +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms] [[put-pipeline-api-request-body]] ==== {api-request-body-title} @@ -77,8 +81,8 @@ Processors run sequentially in the order specified. (Optional, integer) Version number used by external systems to track ingest pipelines. + -This parameter is intended for external systems only. {es} does not use or -validate pipeline version numbers. +See the <> parameter above for +how the version attribute is used. `_meta`:: (Optional, object) diff --git a/docs/reference/ingest/enrich.asciidoc b/docs/reference/ingest/enrich.asciidoc index 051d4bebcd6d1..0813f45840475 100644 --- a/docs/reference/ingest/enrich.asciidoc +++ b/docs/reference/ingest/enrich.asciidoc @@ -218,9 +218,6 @@ Instead, you can: to delete the previous enrich policy. // end::update-enrich-policy[] -include::geo-match-enrich-policy-type-ex.asciidoc[] -include::match-enrich-policy-type-ex.asciidoc[] - [[ingest-enrich-components]] ==== Enrich components @@ -271,3 +268,7 @@ How often {es} checks whether unused enrich indices can be deleted. Defaults to `enrich.max_concurrent_policy_executions`:: Maximum number of enrich policies to execute concurrently. Defaults to `50`. + +include::geo-match-enrich-policy-type-ex.asciidoc[] +include::match-enrich-policy-type-ex.asciidoc[] +include::range-enrich-policy-type-ex.asciidoc[] \ No newline at end of file diff --git a/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc b/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc index 9432c9c06dec8..8081d7cafc855 100644 --- a/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc +++ b/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc @@ -165,6 +165,8 @@ The API returns the following response: -------------------------------------------------- DELETE /_ingest/pipeline/postal_lookup DELETE /_enrich/policy/postal_policy +DELETE /postal_codes +DELETE /users -------------------------------------------------- // TEST[continued] //// diff --git a/docs/reference/ingest/match-enrich-policy-type-ex.asciidoc b/docs/reference/ingest/match-enrich-policy-type-ex.asciidoc index c289a555b1d5b..69ee5800f1114 100644 --- a/docs/reference/ingest/match-enrich-policy-type-ex.asciidoc +++ b/docs/reference/ingest/match-enrich-policy-type-ex.asciidoc @@ -146,6 +146,8 @@ The API returns the following response: -------------------------------------------------- DELETE /_ingest/pipeline/user_lookup DELETE /_enrich/policy/users-policy +DELETE /my-index-000001 +DELETE /users -------------------------------------------------- // TEST[continued] //// diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index 7ee81e4148a75..98898e2652157 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -358,14 +358,14 @@ docker run -v my/source/dir:/usr/share/nginx/html:ro nginx of each node’s `elasticsearch.yml` file. + By default, {es} checks the endpoint for updates every three days. To use -another polling interval, use the <> to set <>. [[manually-update-geoip-databases]] **Manually update your GeoIP2 databases** -. Use the <> to set +. Use the <> to set `ingest.geoip.downloader.enabled` to `false`. This disables automatic updates that may overwrite your database changes. This also deletes all downloaded databases. diff --git a/docs/reference/ingest/range-enrich-policy-type-ex.asciidoc b/docs/reference/ingest/range-enrich-policy-type-ex.asciidoc new file mode 100644 index 0000000000000..af049afc9bb9d --- /dev/null +++ b/docs/reference/ingest/range-enrich-policy-type-ex.asciidoc @@ -0,0 +1,164 @@ +[role="xpack"] +[testenv="basic"] +[[range-enrich-policy-type]] +=== Example: Enrich your data by matching a value to a range + +A `range` <> uses a <> to match a number, date, or IP address in incoming documents to a range +of the same type in the enrich index. Matching a range to a range is not +supported. + +The following example creates a `range` enrich policy that adds a descriptive network name and +responsible department to incoming documents based on an IP address. It then +adds the enrich policy to a processor in an ingest pipeline. + +Use the <> with the appropriate mappings to create a source index. + +[source,console] +---- +PUT /networks +{ + "mappings": { + "properties": { + "range": { "type": "ip_range" }, + "name": { "type": "keyword" }, + "department": { "type": "keyword" } + } + } +} +---- + +The following index API request indexes a new document to that index. + +[source,console] +---- +PUT /networks/_doc/1?refresh=wait_for +{ + "range": "10.100.0.0/16", + "name": "production", + "department": "OPS" +} +---- +// TEST[continued] + +Use the create enrich policy API to create an enrich policy with the +`range` policy type. This policy must include: + +* One or more source indices +* A `match_field`, +the field from the source indices used to match incoming documents +* Enrich fields from the source indices you'd like to append to incoming +documents + +Since we plan to enrich documents based on an IP address, the policy's +`match_field` must be an `ip_range` field. + +[source,console] +---- +PUT /_enrich/policy/networks-policy +{ + "range": { + "indices": "networks", + "match_field": "range", + "enrich_fields": ["name", "department"] + } +} +---- +// TEST[continued] + +Use the <> to create an +enrich index for the policy. + +[source,console] +---- +POST /_enrich/policy/networks-policy/_execute +---- +// TEST[continued] + + +Use the <> to create an ingest +pipeline. In the pipeline, add an <> that +includes: + +* Your enrich policy. +* The `field` of incoming documents used to match documents +from the enrich index. +* The `target_field` used to store appended enrich data for incoming documents. +This field contains the `match_field` and `enrich_fields` specified in your +enrich policy. + +[source,console] +---- +PUT /_ingest/pipeline/networks_lookup +{ + "processors" : [ + { + "enrich" : { + "description": "Add 'network' data based on 'ip'", + "policy_name": "networks-policy", + "field" : "ip", + "target_field": "network", + "max_matches": "10" + } + } + ] +} +---- +// TEST[continued] + +Use the ingest pipeline to index a document. The incoming document should +include the `field` specified in your enrich processor. + +[source,console] +---- +PUT /my-index-000001/_doc/my_id?pipeline=networks_lookup +{ + "ip": "10.100.34.1" +} +---- +// TEST[continued] + +To verify the enrich processor matched and appended the appropriate field data, +use the <> to view the indexed document. + +[source,console] +---- +GET /my-index-000001/_doc/my_id +---- +// TEST[continued] + +The API returns the following response: + +[source,console-result] +---- +{ + "_index" : "my-index-000001", + "_id" : "my_id", + "_version" : 1, + "_seq_no" : 0, + "_primary_term" : 1, + "found" : true, + "_source" : { + "ip" : "10.100.34.1", + "network" : [ + { + "name" : "production", + "range" : "10.100.0.0/16", + "department" : "OPS" + } + ] + } +} +---- +// TESTRESPONSE[s/"_seq_no": \d+/"_seq_no" : $body._seq_no/ s/"_primary_term":1/"_primary_term" : $body._primary_term/] + +//// +[source,console] +-------------------------------------------------- +DELETE /_ingest/pipeline/networks_lookup +DELETE /_enrich/policy/networks-policy +DELETE /networks +DELETE /my-index-000001 +-------------------------------------------------- +// TEST[continued] +//// diff --git a/docs/reference/mapping/fields/ignored-field.asciidoc b/docs/reference/mapping/fields/ignored-field.asciidoc index 0404d3d3a6f99..5249d2d379a8e 100644 --- a/docs/reference/mapping/fields/ignored-field.asciidoc +++ b/docs/reference/mapping/fields/ignored-field.asciidoc @@ -2,8 +2,10 @@ === `_ignored` field The `_ignored` field indexes and stores the names of every field in a document -that has been ignored because it was malformed and -<> was turned on. +that has been ignored when the document was indexed. This can, for example, +be the case when the field was malformed and <> +was turned on, or when a `keyword` fields value exceeds its optional +<> setting. This field is searchable with <>, <> and <> diff --git a/docs/reference/mapping/params/eager-global-ordinals.asciidoc b/docs/reference/mapping/params/eager-global-ordinals.asciidoc index 27c6f94183134..b09e3eae783c8 100644 --- a/docs/reference/mapping/params/eager-global-ordinals.asciidoc +++ b/docs/reference/mapping/params/eager-global-ordinals.asciidoc @@ -85,13 +85,6 @@ PUT my-index-000001/_mapping ------------ // TEST[continued] -IMPORTANT: On a <>, global ordinals are discarded -after each search and rebuilt again when they're requested. This means that -`eager_global_ordinals` should not be used on frozen indices: it would -cause global ordinals to be reloaded on every search. Instead, the index should -be force-merged to a single segment before being frozen. This avoids building -global ordinals altogether (more details can be found in the next section). - ==== Avoiding global ordinal loading Usually, global ordinals do not present a large overhead in terms of their diff --git a/docs/reference/migration/apis/feature_upgrade.asciidoc b/docs/reference/migration/apis/feature_upgrade.asciidoc index 88cd5d477f4d2..738227c5eae25 100644 --- a/docs/reference/migration/apis/feature_upgrade.asciidoc +++ b/docs/reference/migration/apis/feature_upgrade.asciidoc @@ -24,7 +24,7 @@ and to trigger an automated system upgrade that might potentially involve downti ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have the `manage` -<> to use this API. (TODO: true?) +<> to use this API. [[feature-upgrade-api-example]] ==== {api-examples-title} @@ -46,79 +46,80 @@ Example response: { "feature_name" : "async_search", "minimum_index_version" : "8.0.0", - "upgrade_status" : "NO_UPGRADE_NEEDED", + "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "enrich", "minimum_index_version" : "8.0.0", - "upgrade_status" : "NO_UPGRADE_NEEDED", + "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "fleet", "minimum_index_version" : "8.0.0", - "upgrade_status" : "NO_UPGRADE_NEEDED", + "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "geoip", "minimum_index_version" : "8.0.0", - "upgrade_status" : "NO_UPGRADE_NEEDED", + "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "kibana", "minimum_index_version" : "8.0.0", - "upgrade_status" : "NO_UPGRADE_NEEDED", + "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "logstash_management", "minimum_index_version" : "8.0.0", - "upgrade_status" : "NO_UPGRADE_NEEDED", + "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "machine_learning", "minimum_index_version" : "8.0.0", - "upgrade_status" : "NO_UPGRADE_NEEDED", + "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "searchable_snapshots", "minimum_index_version" : "8.0.0", - "upgrade_status" : "NO_UPGRADE_NEEDED", + "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "security", "minimum_index_version" : "8.0.0", - "upgrade_status" : "NO_UPGRADE_NEEDED", + "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "tasks", "minimum_index_version" : "8.0.0", - "upgrade_status" : "NO_UPGRADE_NEEDED", + "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "transform", "minimum_index_version" : "8.0.0", - "upgrade_status" : "NO_UPGRADE_NEEDED", + "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] }, { "feature_name" : "watcher", "minimum_index_version" : "8.0.0", - "upgrade_status" : "NO_UPGRADE_NEEDED", + "migration_status" : "NO_MIGRATION_NEEDED", "indices" : [ ] } ], - "upgrade_status" : "NO_UPGRADE_NEEDED" + "migration_status" : "NO_MIGRATION_NEEDED" } -------------------------------------------------- +// TESTRESPONSE[s/"minimum_index_version" : "8.0.0"/"minimum_index_version" : $body.$_path/] This response tells us that Elasticsearch security needs its internal indices upgraded before we can upgrade the cluster to 8.0. @@ -143,6 +144,7 @@ Example response: ] } -------------------------------------------------- +// TESTRESPONSE[skip: can't actually upgrade system indices in these tests] This tells us that the security index is being upgraded. To check the overall status of the upgrade, call the endpoint with GET. diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc index e166a6eb83740..9a31e82f5d537 100644 --- a/docs/reference/migration/migrate_8_0.asciidoc +++ b/docs/reference/migration/migrate_8_0.asciidoc @@ -24,8 +24,8 @@ coming[8.0.0] * <> * <> * <> +* <> * <> -* <> * <> * <> * <> @@ -38,6 +38,7 @@ coming[8.0.0] * <> * <> * <> +* <> * <> * <> @@ -127,8 +128,8 @@ include::migrate_8_0/ilm.asciidoc[] include::migrate_8_0/indices.asciidoc[] include::migrate_8_0/ingest.asciidoc[] include::migrate_8_0/java.asciidoc[] +include::migrate_8_0/logging.asciidoc[] include::migrate_8_0/mappings.asciidoc[] -include::migrate_8_0/monitoring.asciidoc[] include::migrate_8_0/network.asciidoc[] include::migrate_8_0/node.asciidoc[] include::migrate_8_0/packaging.asciidoc[] @@ -141,6 +142,7 @@ include::migrate_8_0/security.asciidoc[] include::migrate_8_0/settings.asciidoc[] include::migrate_8_0/snapshots.asciidoc[] include::migrate_8_0/threadpool.asciidoc[] +include::migrate_8_0/transform.asciidoc[] include::migrate_8_0/transport.asciidoc[] include::migrate_8_0/watcher.asciidoc[] include::migrate_8_0/migrate_to_java_time.asciidoc[] diff --git a/docs/reference/migration/migrate_8_0/api.asciidoc b/docs/reference/migration/migrate_8_0/api.asciidoc index 13dacdda0dd9e..2c86e0a45442c 100644 --- a/docs/reference/migration/migrate_8_0/api.asciidoc +++ b/docs/reference/migration/migrate_8_0/api.asciidoc @@ -83,10 +83,22 @@ Use {ref}/ml-apis.html#ml-api-datafeed-endpoint[{dfeeds}] instead. [%collapsible] ==== *Details* + -The ability to update a `job_id` in a {dfeed} was deprecated in 7.3.0. and is +The ability to update a `job_id` in a {dfeed} was deprecated in 7.3.0. and is removed in 8.0. *Impact* + It is not possible to move {dfeeds} between {anomaly-jobs}. ==== + +.Create repository and delete repository API's return `409` status code when a repository is in use instead of `500`. +[%collapsible] +==== +*Details* + +The {ref}/put-snapshot-repo-api.html[Create or update snapshot repository API] and +{ref}/delete-snapshot-repo-api.html[Delete snapshot repository API] return `409` +status code when the request is attempting to modify an existing repository that's in use instead of status code `500`. + +*Impact* + +Update client code that handles creation and deletion of repositories to reflect this change. +==== // end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/logging.asciidoc b/docs/reference/migration/migrate_8_0/logging.asciidoc new file mode 100644 index 0000000000000..90f2c104d9325 --- /dev/null +++ b/docs/reference/migration/migrate_8_0/logging.asciidoc @@ -0,0 +1,42 @@ +[discrete] +[[breaking_80_logging_changes]] +==== Logging changes + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] +.{es} JSON logs now comply with ECS. +[%collapsible] +==== +*Details* + +{es}'s {ref}/logging.html[JSON logs] now comply with the +{ecs-ref}/index.html[Elastic Common Schema (ECS)]. Previously, {es}'s JSON logs +used a custom schema. + +*Impact* + +If your application parses {es}'s JSON logs, update it to support the new ECS +format. +==== + + +.{es} no longer emits deprecation logs or slow logs in plaintext. +[%collapsible] +==== +*Details* + +{es} no longer emits a plaintext version of the following logs: + +* Deprecation logs +* Indexing slow logs +* Search slow logs + +These logs are now only available in JSON. + +Server logs are still available in both a JSON and plaintext format. + +*Impact* + +If your application parses {es}'s plaintext logs, update it to use the new ECS +JSON logs. +==== + +// end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/monitoring.asciidoc b/docs/reference/migration/migrate_8_0/monitoring.asciidoc deleted file mode 100644 index cdedfe29982c5..0000000000000 --- a/docs/reference/migration/migrate_8_0/monitoring.asciidoc +++ /dev/null @@ -1,47 +0,0 @@ -[discrete] -[[breaking_80_monitoring_changes]] -=== Monitoring changes - -//NOTE: The notable-breaking-changes tagged regions are re-used in the -//Installation and Upgrade Guide - -//tag::notable-breaking-changes[] -.The `use_ingest` setting on Monitoring exporter configurations has been removed. -[%collapsible] -==== -*Details* + -The `xpack.monitoring.exporters.*.use_ingest` property was deprecated in 7.16.0 and -has been removed. This parameter controlled the creation of pipelines for monitoring -indices that previously had no function. - -*Impact* + -Discontinue the use of the `xpack.monitoring.exporters.*.use_ingest` setting. -==== - -.The `index.pipeline.master_timeout` setting on Monitoring HTTP exporter configurations has been removed. -[%collapsible] -==== -*Details* + -The `xpack.monitoring.exporters.*.index.pipeline.master_timeout` property was -deprecated in 7.16.0. This parameter set the timeout when waiting for the remote -Monitoring cluster to create pipelines. Those pipelines for monitoring indices previously -had no function and are now removed in 8.0.0. - -*Impact* + -Discontinue the use of the `xpack.monitoring.exporters.*.index.pipeline.master_timeout` setting. -==== - -.The `index.template.create_legacy_templates` setting on Monitoring HTTP exporter configurations has been removed. -[%collapsible] -==== -*Details* + -The `xpack.monitoring.exporters.*.index.template.create_legacy_templates` property was -deprecated in 7.16.0. This parameter instructed the exporter to install the previous version -of monitoring templates on the monitoring cluster. These older templates were meant to assist -in transitioning to the current monitoring data format. They are currently empty and are no -longer of any use. - -*Impact* + -Discontinue the use of the `xpack.monitoring.exporters.*.index.template.create_legacy_templates` setting. -==== -//end::notable-breaking-changes[] diff --git a/docs/reference/migration/migrate_8_0/scripting.asciidoc b/docs/reference/migration/migrate_8_0/scripting.asciidoc index 04a5ff40a6231..05141efc37435 100644 --- a/docs/reference/migration/migrate_8_0/scripting.asciidoc +++ b/docs/reference/migration/migrate_8_0/scripting.asciidoc @@ -6,14 +6,15 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] -.The `JodaCompatibleDateTime` class has been removed. +.The `JodaCompatibleZonedDateTime` class has been removed. [%collapsible] ==== *Details* + As a transition from Joda datetime to Java datetime, scripting used -an intermediate class called `JodaCompatibleDateTime`. This class has -been removed and is replaced by `ZonedDateTime`. Any use of casting -to a `JodaCompatibleDateTime` in a script will result in a compilation +an intermediate class called `JodaCompatibleZonedDateTime`. This class +has been removed and is replaced by `ZonedDateTime`. Any use of casting +to a `JodaCompatibleZonedDateTime` or use of method calls only available +in `JodaCompatibleZonedDateTime` in a script will result in a compilation error, and may not allow the upgraded node to start. *Impact* + @@ -21,6 +22,26 @@ Before upgrading, replace `getDayOfWeek` with `getDayOfWeekEnum().value` in any scripts. Any use of `getDayOfWeek` expecting a return value of `int` will result in a compilation error or runtime error and may not allow the upgraded node to start. + +The following `JodaCompatibleZonedDateTime` methods must be replaced using +`ZonedDateTime` methods prior to upgrade: + +* `getMillis()` -> `toInstant().toEpochMilli()` +* `getCenturyOfEra()` -> `get(ChronoField.YEAR_OF_ERA) / 100` +* `getEra()` -> `get(ChronoField.ERA)` +* `getHourOfDay()` -> `getHour()` +* `getMillisOfDay()` -> `get(ChronoField.MILLI_OF_DAY)` +* `getMillisOfSecond()` -> `get(ChronoField.MILLI_OF_SECOND)` +* `getMinuteOfDay()` -> `get(ChronoField.MINUTE_OF_DAY)` +* `getMinuteOfHour()` -> `getMinute()` +* `getMonthOfYear()` -> `getMonthValue()` +* `getSecondOfDay()` -> `get(ChronoField.SECOND_OF_DAY)` +* `getSecondOfMinute()` -> `getSecond()` +* `getWeekOfWeekyear()` -> `get(DateFormatters.WEEK_FIELDS_ROOT.weekBasedYear())` +* `getYearOfCentury()` -> `get(ChronoField.YEAR_OF_ERA) % 100` +* `getYearOfEra()` -> `get(ChronoField.YEAR_OF_ERA)` +* `toString(String)` -> a DateTimeFormatter +* `toString(String, Locale)` -> a DateTimeFormatter ==== .Stored scripts no longer support empty scripts or search templates. diff --git a/docs/reference/migration/migrate_8_0/security.asciidoc b/docs/reference/migration/migrate_8_0/security.asciidoc index 9a91b4b503cbc..1ff1580f6e264 100644 --- a/docs/reference/migration/migrate_8_0/security.asciidoc +++ b/docs/reference/migration/migrate_8_0/security.asciidoc @@ -6,6 +6,25 @@ //Installation and Upgrade Guide //tag::notable-breaking-changes[] +[[deprecate-elasticsearch-setup-passwords]] +.The `elasticsearch-setup-passwords` tool is deprecated. +[%collapsible] +==== +*Details* + +In 8.0, we're deprecating the `elasticsearch-setup-passwords` tool. To +manually reset the password for built-in users (including the `elastic` user), use +the {ref}/reset-password.html[`elasticsearch-reset-password`] tool, the {es} +{ref}/security-api-change-password.html[change passwords API], or the +User Management features in {kib}. We will remove the +`elasticsearch-setup-passwords` tool in a future release. + +*Impact* + +When starting {es} for the first time, passwords are generated automatically for +the `elastic` user. If you run the `elasticsearch-setup-passwords` tool after +starting {es}, the command will fail because the password for the `elastic` +user is already configured. +==== + .The file and native realms are now enabled unless explicitly disabled. [%collapsible] ==== @@ -19,11 +38,11 @@ Previously, the file and native realms had the following implicit behaviors: if any other realm was configured. * If no other realm was available because realms were either not configured, -not perrmitted by license, or explicitly disabled, the file and native realms +not permitted by license, or explicitly disabled, the file and native realms were enabled, even if explicitly disabled. *Impact* + -To explicilty disable the file or native realm, set the respective +To explicitly disable the file or native realm, set the respective `file..enabled` or `native..enabled` setting to `false` under the `xpack.security.authc.realms` namespace in `elasticsearch.yml`. @@ -32,7 +51,7 @@ The following configuration example disables the native realm and the file realm [source,yaml] ---- xpack.security.authc.realms: - + native.realm1.enabled: false file.realm2.enabled: false @@ -78,12 +97,12 @@ now rolled-over by disk size limit as well. Moreover, the rolled-over logs are also gzip compressed. *Impact* + -The names of rolled over audit logfiles (but not the name of the current log) +The names of rolled over audit log files (but not the name of the current log) have changed. -If you've setup automated tools to consume these files, you must configure them -to use the new names and to possibly account for gzip archives instead of plaintext. -The Docker build of Elasticsearch is not affected since it logs on stdout where -rollover is not performed. +If you've set up automated tools to consume these files, you must configure them +to use the new names and to possibly account for `gzip` archives instead of +plain text. The Docker build of {es} is not affected because it logs on `stdout`, +where rollover is not performed. ==== [[accept-default-password-removed]] @@ -123,7 +142,7 @@ these settings in `elasticsearch.yml` will result in an error on startup. *Details* + The `elasticsearch-migrate` tool provided a way to convert file realm users and roles into the native realm. It has been deprecated -since 7.2.0. Users and roles should now be created in the native +since {es} 7.2.0. Users and roles should now be created in the native realm directly. *Impact* + @@ -146,6 +165,37 @@ Specifying this setting in a transport profile in `elasticsearch.yml` will result in an error on startup. ==== +[discrete] +[[saml-realm-nameid-changes]] +.The `nameid_format` SAML realm setting no longer has a default value. +[%collapsible] +==== +*Details* + +In SAML, Identity Providers (IdPs) can either be explicitly configured to +release a `NameID` with a specific format, or configured to attempt to conform +with the requirements of a Service Provider (SP). The SP declares its +requirements in the `NameIDPolicy` element of a SAML Authentication Request. +In {es}, the `nameid_format` SAML realm setting controls the `NameIDPolicy` +value. + +Previously, the default value for `nameid_format` was +`urn:oasis:names:tc:SAML:2.0:nameid-format:transient`. This setting created +authentication requests that required the IdP to release `NameID` with a +`transient` format. + +The default value has been removed, which means that {es} will create SAML Authentication Requests by default that don't put this requirement on the +IdP. If you want to retain the previous behavior, set `nameid_format` to +`urn:oasis:names:tc:SAML:2.0:nameid-format:transient`. + +*Impact* + +If you currently don't configure `nameid_format` explicitly, it's possible +that your IdP will reject authentication requests from {es} because the requests +do not specify a `NameID` format (and your IdP is configured to expect one). +This mismatch can result in a broken SAML configuration. If you're unsure whether +your IdP is explicitly configured to use a certain `NameID` format and you want to retain current behavior +, try setting `nameid_format` to `urn:oasis:names:tc:SAML:2.0:nameid-format:transient` explicitly. +==== + [discrete] [[ssl-validation-changes]] ===== SSL/TLS configuration validation @@ -268,7 +318,7 @@ on startup. [discrete] [[ssl-misc-changes]] -===== Other SSL/TLS changes +===== Other SSL/TLS changes .PKCS#11 keystores and trustores cannot be configured in `elasticsearch.yml` [%collapsible] @@ -288,7 +338,7 @@ Use of a PKCS#11 keystore or truststore as the JRE's default store is not affect *Impact* + If you have a PKCS#11 keystore configured within your `elasticsearch.yml` file, you must remove that -configuration and switch to a supported keystore type, or configure your PKCS#11 keystore as the +configuration and switch to a supported keystore type, or configure your PKCS#11 keystore as the JRE default store. ==== @@ -341,6 +391,7 @@ renamed to better reflect its intended use. Assign users with the `kibana_user` role to the `kibana_admin` role. Discontinue use of the `kibana_user` role. ==== + // end::notable-breaking-changes[] // These are non-notable changes @@ -354,7 +405,7 @@ Discontinue use of the `kibana_user` role. [%collapsible] ==== *Details* + -If `xpack.security.fips_mode.enabled` is true (see <>), +If `xpack.security.fips_mode.enabled` is true (see <>), the value of `xpack.security.authc.password_hashing.algorithm` now defaults to `pbkdf2_stretch`. diff --git a/docs/reference/migration/migrate_8_0/transform.asciidoc b/docs/reference/migration/migrate_8_0/transform.asciidoc new file mode 100644 index 0000000000000..690b175fabf10 --- /dev/null +++ b/docs/reference/migration/migrate_8_0/transform.asciidoc @@ -0,0 +1,21 @@ +[discrete] +[[breaking_80_transform_changes]] +==== Transform changes + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] +.{transforms-cap} created in 7.4 or earlier versions must be upgraded +[%collapsible] +==== +*Details* + +Early beta versions of {transforms} had configuration information in a format +that is no longer supported. + + +*Impact* + +Use the {ref}/upgrade-transforms.html[upgrade {transforms} API] to fix your +{transforms}. This upgrade does not affect the source or destination indices. +==== +// end::notable-breaking-changes[] diff --git a/docs/reference/ml/df-analytics/apis/get-trained-models.asciidoc b/docs/reference/ml/df-analytics/apis/get-trained-models.asciidoc index 10b298c76b327..40d9a8b28a0b8 100644 --- a/docs/reference/ml/df-analytics/apis/get-trained-models.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-trained-models.asciidoc @@ -102,7 +102,7 @@ ascending order. ==== `created_by`::: (string) -Information on the creator of the trained model. +The creator of the trained model. `create_time`::: (<>) @@ -110,11 +110,10 @@ The time when the trained model was created. `default_field_map` ::: (object) -A string to string object that contains the default field map to use -when inferring against the model. For example, data frame analytics -may train the model on a specific multi-field `foo.keyword`. -The analytics job would then supply a default field map entry for -`"foo" : "foo.keyword"`. +A string object that contains the default field map to use when inferring +against the model. For example, {dfanalytics} may train the model on a specific +multi-field `foo.keyword`. The analytics job would then supply a default field +map entry for `"foo" : "foo.keyword"`. + Any field map described in the inference configuration takes precedence. @@ -133,8 +132,8 @@ The estimated number of operations to use the trained model. `inference_config`::: (object) The default configuration for inference. This can be either a `regression` -or `classification` configuration. It must match the underlying -`definition.trained_model`'s `target_type`. +or `classification` configuration. It must match the `target_type` of the +underlying `definition.trained_model`. + .Properties of `inference_config` [%collapsible%open] diff --git a/docs/reference/ml/ml-shared.asciidoc b/docs/reference/ml/ml-shared.asciidoc index 7d0c1e72fd02b..37133c27e5319 100644 --- a/docs/reference/ml/ml-shared.asciidoc +++ b/docs/reference/ml/ml-shared.asciidoc @@ -897,8 +897,8 @@ end::inference-config-classification-num-top-classes[] tag::inference-config-classification-num-top-feature-importance-values[] Specifies the maximum number of -{ml-docs}/ml-feature-importance.html[{feat-imp}] values per document. By -default, it is zero and no {feat-imp} calculation occurs. +{ml-docs}/ml-feature-importance.html[{feat-imp}] values per document. Defaults +to 0 which means no {feat-imp} calculation occurs. end::inference-config-classification-num-top-feature-importance-values[] tag::inference-config-classification-top-classes-results-field[] @@ -908,7 +908,7 @@ end::inference-config-classification-top-classes-results-field[] tag::inference-config-classification-prediction-field-type[] Specifies the type of the predicted field to write. -Acceptable values are: `string`, `number`, `boolean`. When `boolean` is provided +Valid values are: `string`, `number`, `boolean`. When `boolean` is provided `1.0` is transformed to `true` and `0.0` to `false`. end::inference-config-classification-prediction-field-type[] @@ -921,8 +921,8 @@ BERT-style tokenization is to be performed with the enclosed settings. end::inference-config-nlp-tokenization-bert[] tag::inference-config-nlp-tokenization-bert-do-lower-case[] -Should the tokenization lower case the text sequence when building -the tokens. +Specifies if the tokenization lower case the text sequence when building the +tokens. end::inference-config-nlp-tokenization-bert-do-lower-case[] tag::inference-config-nlp-tokenization-bert-with-special-tokens[] @@ -935,29 +935,29 @@ Tokenize with special tokens. The tokens typically included in BERT-style tokeni end::inference-config-nlp-tokenization-bert-with-special-tokens[] tag::inference-config-nlp-tokenization-bert-max-sequence-length[] -The maximum number of tokens allowed to be output by the tokenizer. +Specifies the maximum number of tokens allowed to be output by the tokenizer. The default for BERT-style tokenization is `512`. end::inference-config-nlp-tokenization-bert-max-sequence-length[] tag::inference-config-nlp-vocabulary[] -The configuration for retreiving the model's vocabulary. The vocabulary is then -used at inference time. This information is usually provided automatically by -storing vocabulary in a known, internally managed index. +The configuration for retreiving the vocabulary of the model. The vocabulary is +then used at inference time. This information is usually provided automatically +by storing vocabulary in a known, internally managed index. end::inference-config-nlp-vocabulary[] tag::inference-config-nlp-fill-mask[] -Configuration for a fill_mask NLP task. The fill_mask task works with models -optimized for a fill mask action. For example, for BERT models, the following -text may be provided: "The capital of France is [MASK].". The response indicates -the value most likely to replace `[MASK]`. In this instance, the -most probable token is `paris`. +Configuration for a fill_mask natural language processing (NLP) task. The +fill_mask task works with models optimized for a fill mask action. For example, +for BERT models, the following text may be provided: "The capital of France is +[MASK].". The response indicates the value most likely to replace `[MASK]`. In +this instance, the most probable token is `paris`. end::inference-config-nlp-fill-mask[] tag::inference-config-ner[] Configures a named entity recognition (NER) task. NER is a special case of token classification. Each token in the sequence is classified according to the provided classification labels. Currently, the NER task requires the -`classification_labels` Inside-Outside-Beginning formatted labels. Only +`classification_labels` Inside-Outside-Beginning (IOB) formatted labels. Only person, organization, location, and miscellaneous are supported. end::inference-config-ner[] @@ -977,8 +977,8 @@ end::inference-config-text-classification[] tag::inference-config-text-embedding[] Text embedding takes an input sequence and transforms it into a vector of numbers. These embeddings capture not simply tokens, but semantic meanings and -context. These embeddings can then be used in a <> -field for powerful insights. +context. These embeddings can be used in a <> field +for powerful insights. end::inference-config-text-embedding[] tag::inference-config-regression-num-top-feature-importance-values[] @@ -1003,9 +1003,11 @@ Configures a zero-shot classification task. Zero-shot classification allows for text classification to occur without pre-determined labels. At inference time, it is possible to adjust the labels to classify. This makes this type of model and task exceptionally flexible. - -If consistently classifying the same labels, it may be better to use a fine turned -text classification model. ++ +-- +If consistently classifying the same labels, it may be better to use a +fine-tuned text classification model. +-- end::inference-config-zero-shot-classification[] tag::inference-config-zero-shot-classification-classification-labels[] @@ -1019,9 +1021,11 @@ end::inference-config-zero-shot-classification-classification-labels[] tag::inference-config-zero-shot-classification-hypothesis-template[] This is the template used when tokenizing the sequences for classification. - ++ +-- The labels replace the `{}` value in the text. The default value is: `This example is {}.` +-- end::inference-config-zero-shot-classification-hypothesis-template[] tag::inference-config-zero-shot-classification-labels[] @@ -1031,11 +1035,8 @@ end::inference-config-zero-shot-classification-labels[] tag::inference-config-zero-shot-classification-multi-label[] Indicates if more than one `true` label is possible given the input. - This is useful when labeling text that could pertain to more than one of the -input labels. - -Defaults to `false`. +input labels. Defaults to `false`. end::inference-config-zero-shot-classification-multi-label[] tag::inference-metadata-feature-importance-feature-name[] diff --git a/docs/reference/modules/indices/circuit_breaker.asciidoc b/docs/reference/modules/indices/circuit_breaker.asciidoc index b128ce5dfb371..161f3a8876f18 100644 --- a/docs/reference/modules/indices/circuit_breaker.asciidoc +++ b/docs/reference/modules/indices/circuit_breaker.asciidoc @@ -126,11 +126,11 @@ within a period of time. See the "prefer-parameters" section of the <> documentation for more information. -`script.context.$CONTEXT.max_compilations_rate`:: +`script.max_compilations_rate`:: (<>) Limit for the number of unique dynamic scripts within a certain interval - that are allowed to be compiled for a given context. Defaults to `75/5m`, - meaning 75 every 5 minutes. + that are allowed to be compiled. Defaults to `150/5m`, + meaning 150 every 5 minutes. [[regex-circuit-breaker]] [discrete] diff --git a/docs/reference/modules/indices/recovery.asciidoc b/docs/reference/modules/indices/recovery.asciidoc index 1993d3da5db37..970c1b56af987 100644 --- a/docs/reference/modules/indices/recovery.asciidoc +++ b/docs/reference/modules/indices/recovery.asciidoc @@ -100,3 +100,10 @@ sent in parallel to the target node for each recovery. Defaults to `5`. + Do not increase this setting without carefully verifying that your cluster has the resources available to handle the extra load that will result. + +`indices.recovery.max_concurrent_snapshot_file_downloads_per_node`:: +(<>, Expert) Number of snapshot file downloads requests +execyted in parallel in the target node for all recoveries. Defaults to `25`. ++ +Do not increase this setting without carefully verifying that your cluster has +the resources available to handle the extra load that will result. diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index 50923d3103f6e..2626c255baf05 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -459,6 +459,19 @@ should be configured to locate the data directory outside the {es} home directory, so that the home directory can be deleted without deleting your data! The RPM and Debian distributions do this for you already. +// tag::modules-node-data-path-warning-tag[] +WARNING: Don't modify anything within the data directory or run processes that +might interfere with its contents. If something other than {es} modifies the +contents of the data directory, then {es} may fail, reporting corruption or +other data inconsistencies, or may appear to work correctly having silently +lost some of your data. Don't attempt to take filesystem backups of the data +directory; there is no supported way to restore such a backup. Instead, use +<> to take backups safely. Don't run virus scanners on the +data directory. A virus scanner can prevent {es} from working correctly and may +modify the contents of the data directory. The data directory contains no +executables so a virus scan will only find false positives. +// end::modules-node-data-path-warning-tag[] + [discrete] [[other-node-settings]] === Other node settings diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index 558f0f2f00583..c7242941cc671 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -21,6 +21,11 @@ There are several thread pools, but the important ones include: For count/search/suggest/get operations on `search_throttled indices`. Thread pool type is `fixed` with a size of `1`, and queue_size of `100`. +`search_coordination`:: + For lightweight search-related coordination operations. Thread pool type is + `fixed` with a size of a max of `min(5, (`<>`) / 2)`, and queue_size of `1000`. + `get`:: For get operations. Thread pool type is `fixed` with a size of <>, diff --git a/docs/reference/monitoring/collecting-monitoring-data.asciidoc b/docs/reference/monitoring/collecting-monitoring-data.asciidoc index b43f4b82182ec..769387b8c06e9 100644 --- a/docs/reference/monitoring/collecting-monitoring-data.asciidoc +++ b/docs/reference/monitoring/collecting-monitoring-data.asciidoc @@ -55,7 +55,10 @@ For example, use the following APIs to review and change this setting: [source,console] ---------------------------------- GET _cluster/settings +---------------------------------- +[source,console] +---------------------------------- PUT _cluster/settings { "persistent": { @@ -63,6 +66,7 @@ PUT _cluster/settings } } ---------------------------------- +// TEST[warning:[xpack.monitoring.collection.enabled] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version.] Alternatively, you can enable this setting in {kib}. In the side navigation, click *Monitoring*. If data collection is disabled, you are prompted to turn it diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index d8a5ba1b042f4..29b7bc5f38654 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -26,7 +26,10 @@ You can use the following APIs to review and change this setting: [source,console] ---------------------------------- GET _cluster/settings +---------------------------------- +[source,console] +---------------------------------- PUT _cluster/settings { "persistent": { @@ -34,6 +37,7 @@ PUT _cluster/settings } } ---------------------------------- +// TEST[warning:[xpack.monitoring.collection.enabled] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version.] If {es} {security-features} are enabled, you must have `monitor` cluster privileges to view the cluster settings and `manage` cluster privileges to change them. @@ -201,6 +205,7 @@ PUT _cluster/settings } } ---------------------------------- +// TEST[warning:[xpack.monitoring.elasticsearch.collection.enabled] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version.] If {es} {security-features} are enabled, you must have `monitor` cluster privileges to view the cluster settings and `manage` cluster privileges diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 3348fe3032a5f..c4fd78701d314 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -52,6 +52,28 @@ Refer to <>. See <>. +[role="exclude",id="freeze-index-api"] +=== Freeze index API + +The freeze index API was removed in 8.0. +// tag::frozen-removal-explanation[] +Frozen indices are no longer useful due to +https://www.elastic.co/blog/significantly-decrease-your-elasticsearch-heap-memory-usage[recent +improvements in heap memory usage]. +// end::frozen-removal-explanation[] + +[role="exclude",id="ilm-freeze"] +=== Freeze {ilm-init} action + +The freeze {ilm-init} action was removed in 8.0. +include::redirects.asciidoc[tag=frozen-removal-explanation] + +[role="exclude",id="ilm-freeze-action"] +=== Freeze {ilm-init} action + +The freeze {ilm-init} action was removed in 8.0. +include::redirects.asciidoc[tag=frozen-removal-explanation] + [role="exclude",id="alias"] === Aliases @@ -1039,11 +1061,6 @@ See <>. See <>. -[role="exclude",id="ilm-freeze-action"] -==== Freeze action - -See <>. - [role="exclude",id="ilm-migrate-action"] ==== Migrate action @@ -1636,9 +1653,8 @@ See <>. === Frozen indices // tag::frozen-index-redirect[] -include::{es-repo-dir}/indices/apis/freeze.asciidoc[tag=freeze-api-dep] -For API documentation, see <> and <>. +For API documentation, see <>. // end::frozen-index-redirect[] [role="exclude",id="best_practices"] @@ -1663,16 +1679,16 @@ See the {glossary}/terms.html[Elastic glossary]. [role="exclude",id="multi-index"] === Multi-target syntax -See <>. +See <>. [float] [[hidden]] ==== Hidden data streams and indices -See <>. +See <>. [role="exclude",id="date-math-index-names"] -=== Date math support in system and index alias names +=== Date math support in system and index alias names See <>. diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index f886bd2fd27fa..c0fe9c9d244f8 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -67,12 +67,18 @@ end::allow-no-match-transforms2[] tag::analyzer[] `analyzer`:: (Optional, string) Analyzer to use for the query string. ++ +This parameter can only be used when the `q` query string parameter is +specified. end::analyzer[] tag::analyze_wildcard[] `analyze_wildcard`:: -(Optional, Boolean) If `true`, wildcard and prefix queries are -analyzed. Defaults to `false`. +(Optional, Boolean) If `true`, wildcard and prefix queries are analyzed. +Defaults to `false`. ++ +This parameter can only be used when the `q` query string parameter is +specified. end::analyze_wildcard[] tag::bytes[] @@ -132,6 +138,9 @@ tag::default_operator[] `default_operator`:: (Optional, string) The default operator for query string query: AND or OR. Defaults to `OR`. ++ +This parameter can only be used when the `q` query string parameter is +specified. end::default_operator[] tag::dest[] @@ -160,8 +169,11 @@ end::detailed[] tag::df[] `df`:: -(Optional, string) Field to use as default where no field prefix is -given in the query string. +(Optional, string) Field to use as default where no field prefix is given in the +query string. ++ +This parameter can only be used when the `q` query string parameter is +specified. end::df[] tag::docs-count[] @@ -530,8 +542,12 @@ end::component-template[] tag::lenient[] `lenient`:: -(Optional, Boolean) If `true`, format-based query failures (such as -providing text to a numeric field) will be ignored. Defaults to `false`. +(Optional, Boolean) If `true`, format-based query failures (such as providing +text to a numeric field) in the query string will be ignored. Defaults to +`false`. ++ +This parameter can only be used when the `q` query string parameter is +specified. end::lenient[] tag::level[] diff --git a/docs/reference/scripting/using.asciidoc b/docs/reference/scripting/using.asciidoc index f49be226c2d37..e1eaeaaedad93 100644 --- a/docs/reference/scripting/using.asciidoc +++ b/docs/reference/scripting/using.asciidoc @@ -120,12 +120,8 @@ the `multiplier` parameter without {es} recompiling the script. } ---- -For most contexts, you can compile up to 75 scripts per 5 minutes by default. -For ingest contexts, the default script compilation rate is unlimited. You -can change these settings dynamically by setting -`script.context.$CONTEXT.max_compilations_rate`. For example, the following -setting limits script compilation to 100 scripts every 10 minutes for the -{painless}/painless-field-context.html[field context]: +You can compile up to 150 scripts per 5 minutes by default. +For ingest contexts, the default script compilation rate is unlimited. [source,js] ---- @@ -406,8 +402,8 @@ small. All scripts are cached by default so that they only need to be recompiled when updates occur. By default, scripts do not have a time-based expiration. -You can change this behavior by using the `script.context.$CONTEXT.cache_expire` setting. -Use the `script.context.$CONTEXT.cache_max_size` setting to configure the size of the cache. +You can change this behavior by using the `script.cache.expire` setting. +Use the `script.cache.max_size` setting to configure the size of the cache. NOTE: The size of scripts is limited to 65,535 bytes. Set the value of `script.max_size_in_bytes` to increase that soft limit. If your scripts are really large, then consider using a diff --git a/docs/reference/search/search-vector-tile-api.asciidoc b/docs/reference/search/search-vector-tile-api.asciidoc index 6db48698639c2..96e97c125fa18 100644 --- a/docs/reference/search/search-vector-tile-api.asciidoc +++ b/docs/reference/search/search-vector-tile-api.asciidoc @@ -4,7 +4,6 @@ Vector tile search ++++ -experimental::[] Searches a vector tile for geospatial values. Returns results as a binary https://docs.mapbox.com/vector-tiles/specification[Mapbox vector tile]. diff --git a/docs/reference/search/search-your-data/long-running-searches.asciidoc b/docs/reference/search/search-your-data/long-running-searches.asciidoc index 3cf818b7ad286..21d48df3079ad 100644 --- a/docs/reference/search/search-your-data/long-running-searches.asciidoc +++ b/docs/reference/search/search-your-data/long-running-searches.asciidoc @@ -5,8 +5,7 @@ {es} generally allows you to quickly search across big amounts of data. There are situations where a search executes on many shards, possibly against -<> and spanning multiple -<>, for which +large data sets or multiple <>, for which results are not expected to be returned in milliseconds. When you need to execute long-running searches, synchronously waiting for its results to be returned is not ideal. Instead, Async search lets diff --git a/docs/reference/search/search-your-data/search-your-data.asciidoc b/docs/reference/search/search-your-data/search-your-data.asciidoc index 59952312e18e5..61e61fe661943 100644 --- a/docs/reference/search/search-your-data/search-your-data.asciidoc +++ b/docs/reference/search/search-your-data/search-your-data.asciidoc @@ -220,8 +220,7 @@ _synchronous_ by default. The search request waits for complete results before returning a response. However, complete results can take longer for searches across -<> or <>. +large data sets or <>. To avoid long waits, you can run an _asynchronous_, or _async_, search instead. An <> lets you retrieve partial diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index 6173727d345f1..e09e924528c7b 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -70,6 +70,10 @@ no partial results. Defaults to `true`. To override the default for this field, set the `search.default_allow_partial_results` cluster setting to `false`. +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=analyzer] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=analyze_wildcard] + `batched_reduce_size`:: (Optional, integer) The number of shard results that should be reduced at once on the coordinating node. This value should be used as a protection mechanism @@ -82,6 +86,10 @@ shards in the request can be large. Defaults to `512`. coordinating node and the remote clusters are minimized when executing {ccs} (CCS) requests. See <>. Defaults to `true`. +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=default_operator] + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=df] + `docvalue_fields`:: (Optional, string) A comma-separated list of fields to return as the docvalue representation of a field for each hit. @@ -106,6 +114,8 @@ ignored when frozen. Defaults to `true`. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=lenient] + `max_concurrent_shard_requests`:: (Optional, integer) Defines the number of concurrent shard requests per node this search executes concurrently. This value should be used to limit the @@ -274,9 +284,32 @@ stored fields in the search response. `suggest_field`:: (Optional, string) Specifies which field to use for suggestions. +`suggest_mode`:: +(Optional, string) Specifies the <>. Defaults to +`missing`. Available options: ++ +-- + +* `always` +* `missing` +* `popular` + +This parameter can only be used when the `suggest_field` and `suggest_text` +query string parameters are specified. +-- + +`suggest_size`:: +(Optional, integer) Number of <> to return. ++ +This parameter can only be used when the `suggest_field` and `suggest_text` +query string parameters are specified. + `suggest_text`:: (Optional, string) The source text for which the suggestions should be returned. ++ +This parameter can only be used when the `suggest_field` query string parameter +is specified. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=terminate_after] + diff --git a/docs/reference/setup/important-settings/path-settings.asciidoc b/docs/reference/setup/important-settings/path-settings.asciidoc index f66477c25146b..5767ba7dcd52c 100644 --- a/docs/reference/setup/important-settings/path-settings.asciidoc +++ b/docs/reference/setup/important-settings/path-settings.asciidoc @@ -17,16 +17,14 @@ In production, we strongly recommend you set the `path.data` and `path.logs` in `.msi`>> installations write data and log to locations outside of `$ES_HOME` by default. -IMPORTANT: To avoid errors, only {es} should open files in the `path.data` -directory. Exclude the `path.data` directory from other services that may open -and lock its files, such as antivirus or backup programs. - Supported `path.data` and `path.logs` values vary by platform: include::{es-repo-dir}/tab-widgets/code.asciidoc[] include::{es-repo-dir}/tab-widgets/customize-data-log-path-widget.asciidoc[] +include::{es-repo-dir}/modules/node.asciidoc[tag=modules-node-data-path-warning-tag] + [discrete] ==== Multiple data paths deprecated::[7.13.0] diff --git a/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc index 3e5851aa04cf8..3049ac09469d6 100644 --- a/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/create-snapshot-api.asciidoc @@ -73,11 +73,12 @@ Besides creating a copy of each data stream and index, the snapshot process can ``:: (Required, string) -Name of the repository to create a snapshot in. +Name of the snapshot repository. ``:: (Required, string) -Name of the snapshot to create. This name must be unique in the snapshot repository. +Name of the snapshot. Supports <>. Must be +unique within the snapshot repository. [[create-snapshot-api-query-params]] ==== {api-query-parms-title} @@ -96,50 +97,67 @@ initializes. Defaults to `false`. // Set an attribute so we can reuse these params with anchors :page-id: create-snapshot-api // tag::snapshot-config[] +`expand_wildcards`:: ++ +-- +(Optional, string) Determines how wildcard patterns in the `indices` parameter +match data streams and indices. Supports comma-separated values, such as +`open,hidden`. Defaults to `all`. Valid values are: + +`all`::: +Match any data stream or index, including <> ones. + +`open`::: +Match open indices and data streams. + +`closed`::: +Match closed indices and data streams. + +`hidden`::: +Match hidden data streams and indices. Must be combined with `open`, `closed`, +or both. + +`none`::: +Don't expand wildcard patterns. +-- + `ignore_unavailable`:: (Optional, Boolean) If `false`, the snapshot fails if any data stream or index in `indices` is missing or closed. If `true`, the snapshot ignores missing or closed data streams and indices. Defaults to `false`. -`indices`:: -(Optional, string) -A comma-separated list of data streams and indices to include in the snapshot. -<> is supported. -+ -By default, a snapshot includes all data streams and indices in the cluster. If this -argument is provided, the snapshot only includes the specified data streams and clusters. - `include_global_state`:: + -- (Optional, Boolean) -If `true`, the current global state is included in the snapshot. -Defaults to `true`. - -The global state includes: +If `true`, include the cluster state in the snapshot. Defaults to `true`. +The cluster state includes: -* Persistent cluster settings -* Index templates -* Legacy index templates -* Ingest pipelines -* {ilm-init} lifecycle policies -* Data stored in system indices, such as Watches and task records (configurable via `feature_states`) +include::restore-snapshot-api.asciidoc[tag=cluster-state-contents] -- +`indices`:: +(Optional, string or array of strings) +Comma-separated list of data streams and indices to include in the snapshot. +Supports <>. Defaults to an empty array +(`[]`), which includes all data streams and indices, including system indices. ++ +To exclude all data streams and indices, use `-*` or `none`. + [id="{page-id}-feature-states"] `feature_states`:: (Optional, array of strings) -A list of feature states to be included in this snapshot. A list of features -available for inclusion in the snapshot and their descriptions be can be -retrieved using the <>. -Each feature state includes one or more system indices containing data necessary -for the function of that feature. Providing an empty array will include no feature -states in the snapshot, regardless of the value of `include_global_state`. +Feature states to include in the snapshot. To get a list of possible feature +state values and their descriptions, use the <>. Each feature state includes one or more system indices. ++ +If `include_global_state` is `true`, the snapshot includes all feature states by +default. If `include_global_state` is `false`, the snapshot includes no feature +states by default. + -By default, all available feature states will be included in the snapshot if -`include_global_state` is `true`, or no feature states if `include_global_state` -is `false`. +To exclude all feature states, regardless of the `include_global_state` value, +specify an empty array (`[]`) or `none`. `metadata`:: (Optional, object) diff --git a/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc index 377b4b2321b1a..d50dab75e3c6e 100644 --- a/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc @@ -135,18 +135,19 @@ indices. If `false`, the request doesn't restore aliases. Defaults to `true`. + -- (Optional, Boolean) -If `false`, the global state is not restored. Defaults to `false`. +If `true`, restore the cluster state. Defaults to `false`. -If `true`, the current global state is included in the restore operation. +The cluster state includes: -The global state includes: - -* Persistent cluster settings -* Index templates -* Legacy index templates -* Ingest pipelines -* {ilm-init} lifecycle policies -* For snapshots taken after 7.12.0, data stored in system indices, such as Watches and task records, replacing any existing configuration (configurable via `feature_states`) +// tag::cluster-state-contents[] +* <> +* <> +* <> +* <> +* <> +* For snapshots taken after 7.12.0, data stored in system indices, such as + Watches and task records. +// end::cluster-state-contents[] If `include_global_state` is `true` then the restore operation merges the legacy index templates in your cluster with the templates contained in the @@ -154,6 +155,9 @@ snapshot, replacing any existing ones whose name matches one in the snapshot. It completely removes all persistent settings, non-legacy index templates, ingest pipelines and {ilm-init} lifecycle policies that exist in your cluster and replaces them with the corresponding items from the snapshot. + +You can use the `feature_states` parameter to configure how system indices +are restored from the cluster state. -- [[restore-snapshot-api-feature-states]] diff --git a/docs/reference/snapshot-restore/restore-snapshot.asciidoc b/docs/reference/snapshot-restore/restore-snapshot.asciidoc index eda412f0084e7..aedd96ff4c6b7 100644 --- a/docs/reference/snapshot-restore/restore-snapshot.asciidoc +++ b/docs/reference/snapshot-restore/restore-snapshot.asciidoc @@ -277,6 +277,7 @@ PUT _cluster/settings } } ---- +// TEST[warning:[xpack.monitoring.collection.enabled] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version.] // TEST[continued] //// @@ -310,7 +311,7 @@ For example, the following command creates a user named `restore_user`. Use this file realm user to authenticate requests until the restore operation is complete. -. Use the <> to set +. Use the <> to set <> to `false`. This lets you delete indices and data streams using wildcards. + diff --git a/docs/reference/sql/endpoints/jdbc.asciidoc b/docs/reference/sql/endpoints/jdbc.asciidoc index 7b0e74cd4951a..04ab36f8839ef 100644 --- a/docs/reference/sql/endpoints/jdbc.asciidoc +++ b/docs/reference/sql/endpoints/jdbc.asciidoc @@ -88,20 +88,20 @@ Timezone used by the driver _per connection_ indicated by its `ID`. [discrete] ===== Network -`connect.timeout` (default 30s):: -Connection timeout (in seconds). That is the maximum amount of time waiting to make a connection to the server. +`connect.timeout` (default `30000`):: +Connection timeout (in milliseconds). That is the maximum amount of time waiting to make a connection to the server. -`network.timeout` (default 60s):: -Network timeout (in seconds). That is the maximum amount of time waiting for the network. +`network.timeout` (default `60000`):: +Network timeout (in milliseconds). That is the maximum amount of time waiting for the network. -`page.timeout` (default 45s):: -Page timeout (in seconds). That is the maximum amount of time waiting for a page. +`page.timeout` (default `45000`):: +Page timeout (in milliseconds). That is the maximum amount of time waiting for a page. -`page.size` (default 1000):: +`page.size` (default `1000`):: Page size (in entries). The number of results returned per page by the server. -`query.timeout` (default 90s):: -Query timeout (in seconds). That is the maximum amount of time waiting for a query to return. +`query.timeout` (default `90000`):: +Query timeout (in milliseconds). That is the maximum amount of time waiting for a query to return. [[jdbc-cfg-auth]] [discrete] @@ -115,7 +115,7 @@ Query timeout (in seconds). That is the maximum amount of time waiting for a que [discrete] ==== SSL -`ssl` (default false):: Enable SSL +`ssl` (default `false`):: Enable SSL `ssl.keystore.location`:: key store (if used) location @@ -145,12 +145,12 @@ will be - typically the first in natural ascending order) for fields with multip [discrete] ==== Index -`index.include.frozen` (default `false`):: Whether to include <> in the query execution or not (default). +`index.include.frozen` (default `false`):: Whether to include frozen indices in the query execution or not (default). [discrete] ==== Additional -`validate.properties` (default true):: If disabled, it will ignore any misspellings or unrecognizable properties. When enabled, an exception +`validate.properties` (default `true`):: If disabled, it will ignore any misspellings or unrecognizable properties. When enabled, an exception will be thrown if the provided property cannot be recognized. diff --git a/docs/reference/sql/language/indices.asciidoc b/docs/reference/sql/language/indices.asciidoc index 6180f5b1c07ca..8887733325a51 100644 --- a/docs/reference/sql/language/indices.asciidoc +++ b/docs/reference/sql/language/indices.asciidoc @@ -49,7 +49,7 @@ Using `SHOW TABLES` command again: include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesLikeWildcard] ---- -The pattern matches all tables that start with `emp`. +The pattern matches all tables that start with `emp`. This command supports _escaping_ as well, for example: @@ -88,7 +88,7 @@ requires the keyword `LIKE` for SQL `LIKE` pattern. [[sql-index-frozen]] === Frozen Indices -By default, {es-sql} doesn't search <>. To +By default, {es-sql} doesn't search <>. To search frozen indices, use one of the following features: dedicated configuration parameter:: @@ -108,4 +108,4 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[showTablesIncludeFrozen] include-tagged::{sql-specs}/docs/docs.csv-spec[fromTableIncludeFrozen] ---- -Unless enabled, frozen indices are completely ignored; it is as if they do not exist and as such, queries ran against them are likely to fail. \ No newline at end of file +Unless enabled, frozen indices are completely ignored; it is as if they do not exist and as such, queries ran against them are likely to fail. diff --git a/docs/reference/transform/apis/index.asciidoc b/docs/reference/transform/apis/index.asciidoc index b0948a7adb3dc..34a0b92c2d344 100644 --- a/docs/reference/transform/apis/index.asciidoc +++ b/docs/reference/transform/apis/index.asciidoc @@ -13,5 +13,6 @@ include::preview-transform.asciidoc[leveloffset=+2] include::start-transform.asciidoc[leveloffset=+2] //STOP include::stop-transform.asciidoc[leveloffset=+2] -//UPDATE -include::update-transform.asciidoc[leveloffset=+2] \ No newline at end of file +//UPDATE-UPGRADE +include::update-transform.asciidoc[leveloffset=+2] +include::upgrade-transforms.asciidoc[leveloffset=+2] \ No newline at end of file diff --git a/docs/reference/transform/apis/transform-apis.asciidoc b/docs/reference/transform/apis/transform-apis.asciidoc index b44c5f4970b74..a6bd6fc261770 100644 --- a/docs/reference/transform/apis/transform-apis.asciidoc +++ b/docs/reference/transform/apis/transform-apis.asciidoc @@ -11,3 +11,4 @@ * <> * <> * <> +* <> diff --git a/docs/reference/transform/apis/upgrade-transforms.asciidoc b/docs/reference/transform/apis/upgrade-transforms.asciidoc index 7c95a31e63243..8daf3768466d2 100644 --- a/docs/reference/transform/apis/upgrade-transforms.asciidoc +++ b/docs/reference/transform/apis/upgrade-transforms.asciidoc @@ -1,14 +1,14 @@ [role="xpack"] [testenv="basic"] [[upgrade-transforms]] -= Upgrade {transform} API += Upgrade {transforms} API [subs="attributes"] ++++ -Upgrade {transform} +Upgrade {transforms} ++++ -Upgrades all {transform}s. +Upgrades all {transforms}. [[upgrade-transforms-request]] == {api-request-title} @@ -22,36 +22,82 @@ Requires the following privileges: * cluster: `manage_transform` (the `transform_admin` built-in role grants this privilege) -* source indices: `read`, `view_index_metadata` -* destination index: `read`, `index`. [[upgrade-transforms-desc]] == {api-description-title} -This API upgrades all existing {transform}s. +{transforms-cap} are compatible across minor versions and between supported +major versions. However, over time, the format of {transform} configuration +information may change. This API identifies {transforms} which have a legacy +configuration format and upgrades them to the latest version; including clean up +of the internal data structures that store {transform} state and checkpoints. +{transform-cap} upgrade does not affect the source and destination indices. + +If a {transform} upgrade step fails, the upgrade stops, and an error is returned +about the underlying issue. Resolve the issue then re-run the process again. A +summary is returned when the upgrade is finished. + +To ensure {ctransforms} remain running during a major version upgrade of the +cluster – for example, from 7.16 to 8.0 – it is recommended to upgrade +{transforms} before upgrading the cluster. You may want to perform a recent +cluster backup prior to the upgrade. + + +[IMPORTANT] +==== + +* When {es} {security-features} are enabled, your {transform} remembers the +roles of the user who created or updated it last. In contrast to +<>, a {transform} upgrade does not change the +stored roles, therefore the role used to read source data and write to the +destination index remains unchanged. + +==== + [[upgrade-transforms-query-parms]] == {api-query-parms-title} `dry_run`:: - (Optional, Boolean) When `true`, only checks for updates but does not execute them. + (Optional, Boolean) When `true`, only checks for updates but does not execute + them. Defaults to `false`. + + +[[upgrade-transforms-response-body]] +== {api-response-body-title} + +`needs_update`:: + (integer) The number of {transforms} that need to be upgraded. + +`no_action`:: + (integer) The number of {transforms} that don't require upgrading. + +`updated`:: + (integer) The number of {transforms} that have been upgraded. + + [[upgrade-transforms-example]] == {api-examples-title} +To upgrade the legacy {transforms} to the latest configuration format, perform +the following API call: + [source,console] -------------------------------------------------- POST _transform/_upgrade -------------------------------------------------- // TEST[setup:simple_kibana_continuous_pivot] -When all {transform}s are upgraded, you receive a summary: +When all {transforms} are upgraded, you receive a summary: [source,console-result] ---- { + "needs_update": 0, + "updated": 2, "no_action": 1 } ---- -// TESTRESPONSE[s/"no_action" : 1/"no_action" : $body.no_action/] +// TESTRESPONSE[skip:TBD] diff --git a/docs/reference/transform/ecommerce-tutorial.asciidoc b/docs/reference/transform/ecommerce-tutorial.asciidoc index 2e74dbeacfc9d..9c121a0f87d3f 100644 --- a/docs/reference/transform/ecommerce-tutorial.asciidoc +++ b/docs/reference/transform/ecommerce-tutorial.asciidoc @@ -241,6 +241,11 @@ destination index. In {kib}, if you copied the API request to your clipboard, paste it into the console, then refer to the `generated_dest_index` object in the API response. +NOTE: {transforms-cap} might have more configuration options provided by the +APIs than the options available in {kib}. For example, you can set an ingest +pipeline for `dest` by calling the <>. For all the {transform} +configuration options, refer to the <>. + .API example [%collapsible] ==== diff --git a/docs/reference/transform/limitations.asciidoc b/docs/reference/transform/limitations.asciidoc index e4df33a311fa5..0e813a81f39da 100644 --- a/docs/reference/transform/limitations.asciidoc +++ b/docs/reference/transform/limitations.asciidoc @@ -276,4 +276,12 @@ that belong to your space. However, this limited scope does not apply to [[transform-kibana-limitations]] === Up to 1,000 {transforms} are listed in {kib} -The {transforms} management page in {kib} lists up to 1000 {transforms}. \ No newline at end of file +The {transforms} management page in {kib} lists up to 1000 {transforms}. + +[discrete] +[[transform-ui-support]] +=== {kib} might not support every {transform} configuration option + +There might be configuration options available via the {transform} APIs that are +not supported in {kib}. For an exhaustive list of configuration options, refer +to the <>. \ No newline at end of file diff --git a/docs/reference/transform/overview.asciidoc b/docs/reference/transform/overview.asciidoc index 78e24b071c40f..f2d177ceab544 100644 --- a/docs/reference/transform/overview.asciidoc +++ b/docs/reference/transform/overview.asciidoc @@ -8,8 +8,14 @@ You can choose either of the following methods to transform your data: <> or <>. -IMPORTANT: All {transforms} leave your source index intact. They create a new -index that is dedicated to the transformed data. +[IMPORTANT] +==== +* All {transforms} leave your source index intact. They create a new + index that is dedicated to the transformed data. +* {transforms-cap} might have more configuration options provided by the APIs + than the options available in {kib}. For all the {transform} configuration + options, refer to the <>. +==== {transforms-cap} are persistent tasks; they are stored in cluster state which makes them resilient for node failures. Refer to <> and diff --git a/docs/reference/upgrade.asciidoc b/docs/reference/upgrade.asciidoc index 82d8dd43bd0d6..b05d4fd8d4733 100644 --- a/docs/reference/upgrade.asciidoc +++ b/docs/reference/upgrade.asciidoc @@ -55,19 +55,19 @@ endif::[] |7.0–7.15 a| -. https://www.elastic.co/guide/en/elasticsearch/reference/{prev-major-version}/rolling-upgrades.html[Rolling upgrade] to {prev-major-version} +. {ref-7x}/rolling-upgrades.html[Rolling upgrade] to 7.16 . <> to {version} |6.8 a| -. https://www.elastic.co/guide/en/elasticsearch/reference/{prev-major-version}/rolling-upgrades.html[Rolling upgrade] to {prev-major-version} +. {ref-7x}/rolling-upgrades.html[Rolling upgrade] to 7.16 . <> to {version} |6.0–6.7 a| . https://www.elastic.co/guide/en/elasticsearch/reference/6.8/rolling-upgrades.html[Rolling upgrade] to 6.8 -. https://www.elastic.co/guide/en/elasticsearch/reference/{prev-major-version}/rolling-upgrades.html[Rolling upgrade] to {prev-major-version} +. {ref-7x}/rolling-upgrades.html[Rolling upgrade] to 7.16 . <> to {version} |==== diff --git a/docs/reference/upgrade/archived-settings.asciidoc b/docs/reference/upgrade/archived-settings.asciidoc index d1b65b27e4cb3..dbc88a9d6811e 100644 --- a/docs/reference/upgrade/archived-settings.asciidoc +++ b/docs/reference/upgrade/archived-settings.asciidoc @@ -22,7 +22,7 @@ GET _cluster/settings?flat_settings=true&filter_path=persistent.archived* ---- You can remove archived cluster settings using the -<>. +<>. [source,console] ---- diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index 5ab541e5bb45a..d7305706e9d99 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -13,12 +13,16 @@ into the following two groups and upgrade the groups in this order: . Nodes that are not <>. You can retrieve a list of these nodes with `GET /_nodes/_all,master:false` or by finding all the nodes configured with `node.master: false`. +.. If you are using data tiers, you should +upgrade the nodes by tier, completing one tier at a time, in this order: frozen, cold, warm and finally hot tier. +This is to ensure ILM can continue to move between the phases and ensure version +compatibility. +.. If you are not using data tiers, you may upgrade the nodes within the group in any order. + . Master-eligible nodes, which are the remaining nodes. You can retrieve a list of these nodes with `GET /_nodes/master:true`. -You may upgrade the nodes within each of these groups in any order. - Upgrading the nodes in this order ensures that the master-ineligible nodes are always running a version at least as new as the master-eligible nodes. Newer nodes can always join a cluster with an older master, but older nodes cannot diff --git a/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java index e9a9be78f311a..d4020b0a7ef4e 100644 --- a/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java @@ -8,9 +8,9 @@ package org.elasticsearch.nio; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.test.ESTestCase; import org.junit.Before; import org.mockito.ArgumentCaptor; @@ -27,8 +27,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.isNull; import static org.mockito.Matchers.same; import static org.mockito.Mockito.doAnswer; @@ -168,7 +168,7 @@ public void testSelectorTimeoutWillBeReducedIfTaskSooner() throws Exception { public void testSelectorClosedExceptionIsNotCaughtWhileRunning() throws IOException { boolean closedSelectorExceptionCaught = false; - when(rawSelector.select(anyInt())).thenThrow(new ClosedSelectorException()); + when(rawSelector.select(anyLong())).thenThrow(new ClosedSelectorException()); try { this.selector.singleLoop(); } catch (ClosedSelectorException e) { @@ -181,7 +181,7 @@ public void testSelectorClosedExceptionIsNotCaughtWhileRunning() throws IOExcept public void testIOExceptionWhileSelect() throws IOException { IOException ioException = new IOException(); - when(rawSelector.select(anyInt())).thenThrow(ioException); + when(rawSelector.select(anyLong())).thenThrow(ioException); this.selector.singleLoop(); diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DerParser.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DerParser.java index a188636b1c9fa..7b6f8aa5480bc 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DerParser.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/DerParser.java @@ -36,21 +36,22 @@ public final class DerParser { private static final int CONSTRUCTED = 0x20; // Tag and data types - private static final int INTEGER = 0x02; - private static final int OCTET_STRING = 0x04; - private static final int OBJECT_OID = 0x06; - private static final int NUMERIC_STRING = 0x12; - private static final int PRINTABLE_STRING = 0x13; - private static final int VIDEOTEX_STRING = 0x15; - private static final int IA5_STRING = 0x16; - private static final int GRAPHIC_STRING = 0x19; - private static final int ISO646_STRING = 0x1A; - private static final int GENERAL_STRING = 0x1B; - - private static final int UTF8_STRING = 0x0C; - private static final int UNIVERSAL_STRING = 0x1C; - private static final int BMP_STRING = 0x1E; - + static final class Type { + static final int INTEGER = 0x02; + static final int OCTET_STRING = 0x04; + static final int OBJECT_OID = 0x06; + static final int SEQUENCE = 0x10; + static final int NUMERIC_STRING = 0x12; + static final int PRINTABLE_STRING = 0x13; + static final int VIDEOTEX_STRING = 0x15; + static final int IA5_STRING = 0x16; + static final int GRAPHIC_STRING = 0x19; + static final int ISO646_STRING = 0x1A; + static final int GENERAL_STRING = 0x1B; + static final int UTF8_STRING = 0x0C; + static final int UNIVERSAL_STRING = 0x1C; + static final int BMP_STRING = 0x1E; + } private InputStream derInputStream; private int maxAsnObjectLength; @@ -60,6 +61,22 @@ public DerParser(byte[] bytes) { this.maxAsnObjectLength = bytes.length; } + /** + * Read an object and verify its type + * @param requiredType The expected type code + * @throws IOException if data can not be parsed + * @throws IllegalStateException if the parsed object is of the wrong type + */ + public Asn1Object readAsn1Object(int requiredType) throws IOException { + final Asn1Object obj = readAsn1Object(); + if (obj.type != requiredType) { + throw new IllegalStateException( + "Expected ASN.1 object of type 0x" + Integer.toHexString(requiredType) + " but was 0x" + Integer.toHexString(obj.type) + ); + } + return obj; + } + public Asn1Object readAsn1Object() throws IOException { int tag = derInputStream.read(); if (tag == -1) { @@ -207,7 +224,7 @@ public DerParser getParser() throws IOException { * @return BigInteger */ public BigInteger getInteger() throws IOException { - if (type != DerParser.INTEGER) + if (type != Type.INTEGER) throw new IOException("Invalid DER: object is not integer"); //$NON-NLS-1$ return new BigInteger(value); @@ -218,28 +235,28 @@ public String getString() throws IOException { String encoding; switch (type) { - case DerParser.OCTET_STRING: + case Type.OCTET_STRING: // octet string is basically a byte array return toHexString(value); - case DerParser.NUMERIC_STRING: - case DerParser.PRINTABLE_STRING: - case DerParser.VIDEOTEX_STRING: - case DerParser.IA5_STRING: - case DerParser.GRAPHIC_STRING: - case DerParser.ISO646_STRING: - case DerParser.GENERAL_STRING: + case Type.NUMERIC_STRING: + case Type.PRINTABLE_STRING: + case Type.VIDEOTEX_STRING: + case Type.IA5_STRING: + case Type.GRAPHIC_STRING: + case Type.ISO646_STRING: + case Type.GENERAL_STRING: encoding = "ISO-8859-1"; //$NON-NLS-1$ break; - case DerParser.BMP_STRING: + case Type.BMP_STRING: encoding = "UTF-16BE"; //$NON-NLS-1$ break; - case DerParser.UTF8_STRING: + case Type.UTF8_STRING: encoding = "UTF-8"; //$NON-NLS-1$ break; - case DerParser.UNIVERSAL_STRING: + case Type.UNIVERSAL_STRING: throw new IOException("Invalid DER: can't handle UCS-4 string"); //$NON-NLS-1$ default: @@ -251,7 +268,7 @@ public String getString() throws IOException { public String getOid() throws IOException { - if (type != DerParser.OBJECT_OID) { + if (type != Type.OBJECT_OID) { throw new IOException("Ivalid DER: object is not object OID"); } StringBuilder sb = new StringBuilder(64); diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java index d56459746b9cf..151bb6331cf9a 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/PemUtils.java @@ -25,6 +25,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.security.AccessControlException; +import java.security.AlgorithmParameters; import java.security.GeneralSecurityException; import java.security.KeyFactory; import java.security.KeyPairGenerator; @@ -68,6 +69,9 @@ public final class PemUtils { private static final String OPENSSL_EC_PARAMS_FOOTER = "-----END EC PARAMETERS-----"; private static final String HEADER = "-----BEGIN"; + private static final String PBES2_OID = "1.2.840.113549.1.5.13"; + private static final String AES_OID = "2.16.840.1.101.3.4.1"; + private PemUtils() { throw new IllegalStateException("Utility class should not be instantiated"); } @@ -365,10 +369,14 @@ private static PrivateKey parsePKCS8Encrypted(BufferedReader bReader, char[] key } byte[] keyBytes = Base64.getDecoder().decode(sb.toString()); - EncryptedPrivateKeyInfo encryptedPrivateKeyInfo = new EncryptedPrivateKeyInfo(keyBytes); - SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance(encryptedPrivateKeyInfo.getAlgName()); + final EncryptedPrivateKeyInfo encryptedPrivateKeyInfo = getEncryptedPrivateKeyInfo(keyBytes); + String algorithm = encryptedPrivateKeyInfo.getAlgName(); + if (algorithm.equals("PBES2") || algorithm.equals("1.2.840.113549.1.5.13")) { + algorithm = getPBES2Algorithm(encryptedPrivateKeyInfo); + } + SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance(algorithm); SecretKey secretKey = secretKeyFactory.generateSecret(new PBEKeySpec(keyPassword)); - Cipher cipher = Cipher.getInstance(encryptedPrivateKeyInfo.getAlgName()); + Cipher cipher = Cipher.getInstance(algorithm); cipher.init(Cipher.DECRYPT_MODE, secretKey, encryptedPrivateKeyInfo.getAlgParameters()); PKCS8EncodedKeySpec keySpec = encryptedPrivateKeyInfo.getKeySpec(cipher); String keyAlgo = getKeyAlgorithmIdentifier(keySpec.getEncoded()); @@ -376,6 +384,55 @@ private static PrivateKey parsePKCS8Encrypted(BufferedReader bReader, char[] key return keyFactory.generatePrivate(keySpec); } + private static EncryptedPrivateKeyInfo getEncryptedPrivateKeyInfo(byte[] keyBytes) throws IOException, GeneralSecurityException { + try { + return new EncryptedPrivateKeyInfo(keyBytes); + } catch (IOException e) { + // The Sun JCE provider can't handle non-AES PBES2 data (but it can handle PBES1 DES data - go figure) + // It's not worth our effort to try and decrypt it ourselves, but we can detect it and give a good error message + DerParser parser = new DerParser(keyBytes); + final DerParser.Asn1Object rootSeq = parser.readAsn1Object(DerParser.Type.SEQUENCE); + parser = rootSeq.getParser(); + final DerParser.Asn1Object algSeq = parser.readAsn1Object(DerParser.Type.SEQUENCE); + parser = algSeq.getParser(); + final String algId = parser.readAsn1Object(DerParser.Type.OBJECT_OID).getOid(); + if (PBES2_OID.equals(algId)) { + final DerParser.Asn1Object algData = parser.readAsn1Object(DerParser.Type.SEQUENCE); + parser = algData.getParser(); + final DerParser.Asn1Object ignoreKdf = parser.readAsn1Object(DerParser.Type.SEQUENCE); + final DerParser.Asn1Object cryptSeq = parser.readAsn1Object(DerParser.Type.SEQUENCE); + parser = cryptSeq.getParser(); + final String encryptionId = parser.readAsn1Object(DerParser.Type.OBJECT_OID).getOid(); + if (encryptionId.startsWith(AES_OID) == false) { + final String name = getAlgorithmNameFromOid(encryptionId); + throw new GeneralSecurityException( + "PKCS#8 Private Key is encrypted with unsupported PBES2 algorithm [" + + encryptionId + + "]" + + (name == null ? "" : " (" + name + ")"), + e + ); + } + } + throw e; + } + } + + /** + * This is horrible, but it's the only option other than to parse the encoded ASN.1 value ourselves + * @see AlgorithmParameters#toString() and com.sun.crypto.provider.PBES2Parameters#toString() + */ + private static String getPBES2Algorithm(EncryptedPrivateKeyInfo encryptedPrivateKeyInfo) { + final AlgorithmParameters algParameters = encryptedPrivateKeyInfo.getAlgParameters(); + if (algParameters != null) { + return algParameters.toString(); + } else { + // AlgorithmParameters can be null when running on BCFIPS. + // However, since BCFIPS doesn't support any PBE specs, nothing we do here would work, so we just do enough to avoid an NPE + return encryptedPrivateKeyInfo.getAlgName(); + } + } + /** * Decrypts the password protected contents using the algorithm and IV that is specified in the PEM Headers of the file * @@ -604,7 +661,7 @@ private static String getKeyAlgorithmIdentifier(byte[] keyBytes) throws IOExcept return "EC"; } throw new GeneralSecurityException("Error parsing key algorithm identifier. Algorithm with OID [" + oidString + - "] is not żsupported"); + "] is not supported"); } public static List readCertificates(Collection certPaths) throws CertificateException, IOException { @@ -622,6 +679,56 @@ public static List readCertificates(Collection certPaths) thr return certificates; } + private static String getAlgorithmNameFromOid(String oidString) throws GeneralSecurityException { + switch (oidString) { + case "1.2.840.10040.4.1": + return "DSA"; + case "1.2.840.113549.1.1.1": + return "RSA"; + case "1.2.840.10045.2.1": + return "EC"; + case "1.3.14.3.2.7": + return "DES-CBC"; + case "2.16.840.1.101.3.4.1.1": + return "AES-128_ECB"; + case "2.16.840.1.101.3.4.1.2": + return "AES-128_CBC"; + case "2.16.840.1.101.3.4.1.3": + return "AES-128_OFB"; + case "2.16.840.1.101.3.4.1.4": + return "AES-128_CFB"; + case "2.16.840.1.101.3.4.1.6": + return "AES-128_GCM"; + case "2.16.840.1.101.3.4.1.21": + return "AES-192_ECB"; + case "2.16.840.1.101.3.4.1.22": + return "AES-192_CBC"; + case "2.16.840.1.101.3.4.1.23": + return "AES-192_OFB"; + case "2.16.840.1.101.3.4.1.24": + return "AES-192_CFB"; + case "2.16.840.1.101.3.4.1.26": + return "AES-192_GCM"; + case "2.16.840.1.101.3.4.1.41": + return "AES-256_ECB"; + case "2.16.840.1.101.3.4.1.42": + return "AES-256_CBC"; + case "2.16.840.1.101.3.4.1.43": + return "AES-256_OFB"; + case "2.16.840.1.101.3.4.1.44": + return "AES-256_CFB"; + case "2.16.840.1.101.3.4.1.46": + return "AES-256_GCM"; + case "2.16.840.1.101.3.4.1.5": + return "AESWrap-128"; + case "2.16.840.1.101.3.4.1.25": + return "AESWrap-192"; + case "2.16.840.1.101.3.4.1.45": + return "AESWrap-256"; + } + return null; + } + private static String getEcCurveNameFromOid(String oidString) throws GeneralSecurityException { switch (oidString) { // see https://tools.ietf.org/html/rfc5480#section-2.1.1.1 diff --git a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemUtilsTests.java b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemUtilsTests.java index 72f456daaa557..85d468def0d5a 100644 --- a/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemUtilsTests.java +++ b/libs/ssl-config/src/test/java/org/elasticsearch/common/ssl/PemUtilsTests.java @@ -15,6 +15,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.security.AlgorithmParameters; +import java.security.GeneralSecurityException; import java.security.Key; import java.security.KeyStore; import java.security.PrivateKey; @@ -26,6 +27,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; import static org.hamcrest.core.StringContains.containsString; public class PemUtilsTests extends ESTestCase { @@ -79,17 +81,49 @@ public void testReadPKCS8EcKey() throws Exception { assertThat(privateKey, equalTo(key)); } - public void testReadEncryptedPKCS8Key() throws Exception { + public void testReadEncryptedPKCS8PBES1Key() throws Exception { assumeFalse("Can't run in a FIPS JVM, PBE KeySpec is not available", inFipsJvm()); Key key = getKeyFromKeystore("RSA"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath - ("/certs/pem-utils/key_pkcs8_encrypted.pem"), TESTNODE_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey( + getDataPath("/certs/pem-utils/key_pkcs8_encrypted_pbes1_des.pem"), + TESTNODE_PASSWORD + ); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); } + public void testReadEncryptedPKCS8PBES2AESKey() throws Exception { + assumeFalse("Can't run in a FIPS JVM, PBE KeySpec is not available", inFipsJvm()); + Key key = getKeyFromKeystore("RSA"); + assertThat(key, notNullValue()); + assertThat(key, instanceOf(PrivateKey.class)); + PrivateKey privateKey = PemUtils.parsePrivateKey( + getDataPath("/certs/pem-utils/key_pkcs8_encrypted_pbes2_aes.pem"), + TESTNODE_PASSWORD + ); + assertThat(privateKey, notNullValue()); + assertThat(privateKey, equalTo(key)); + } + + public void testReadEncryptedPKCS8PBES2DESKey() throws Exception { + assumeFalse("Can't run in a FIPS JVM, PBE KeySpec is not available", inFipsJvm()); + + // Sun JSE cannot read keys encrypted with PBES2 DES (but does support AES with PBES2 and DES with PBES1) + // Rather than add our own support for this we just detect that our error message is clear and meaningful + final GeneralSecurityException exception = expectThrows( + GeneralSecurityException.class, + () -> PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/key_pkcs8_encrypted_pbes2_des.pem"), TESTNODE_PASSWORD) + ); + assertThat( + exception.getMessage(), + equalTo("PKCS#8 Private Key is encrypted with unsupported PBES2 algorithm [1.3.14.3.2.7] (DES-CBC)") + ); + assertThat(exception.getCause(), instanceOf(IOException.class)); + assertThat(exception.getCause().getMessage(), startsWith("PBE parameter parsing error")); + } + public void testReadDESEncryptedPKCS1Key() throws Exception { Key key = getKeyFromKeystore("RSA"); assertThat(key, notNullValue()); @@ -134,8 +168,10 @@ public void testReadOpenSslDsaKeyWithParams() throws Exception { Key key = getKeyFromKeystore("DSA"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/dsa_key_openssl_plain_with_params.pem"), - EMPTY_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey( + getDataPath("/certs/pem-utils/dsa_key_openssl_plain_with_params.pem"), + EMPTY_PASSWORD + ); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); @@ -165,8 +201,10 @@ public void testReadOpenSslEcKeyWithParams() throws Exception { Key key = getKeyFromKeystore("EC"); assertThat(key, notNullValue()); assertThat(key, instanceOf(PrivateKey.class)); - PrivateKey privateKey = PemUtils.parsePrivateKey(getDataPath("/certs/pem-utils/ec_key_openssl_plain_with_params.pem"), - EMPTY_PASSWORD); + PrivateKey privateKey = PemUtils.parsePrivateKey( + getDataPath("/certs/pem-utils/ec_key_openssl_plain_with_params.pem"), + EMPTY_PASSWORD + ); assertThat(privateKey, notNullValue()); assertThat(privateKey, equalTo(key)); diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/README.asciidoc b/libs/ssl-config/src/test/resources/certs/pem-utils/README.asciidoc index f7fbd2ad69c0d..7ed9e4c6c4c50 100644 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/README.asciidoc +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/README.asciidoc @@ -101,12 +101,34 @@ openssl pkcs12 -in ec.p12 -nodes -nocerts | openssl pkcs8 -topk8 -nocrypt -outfo ---- - Create `PKCS#8` encrypted key from the encrypted `PKCS#1` encoded `testnode.pem` +The `-v1 PBE-MD5-DES` parameter forces pbeWithMD5AndDES-CBC as the encryption scheme (which is also the default, but that could change with diffent OpenSSL versions) +See: http://oid-info.com/get/1.2.840.113549.1.5.3 +See: https://datatracker.ietf.org/doc/html/rfc8018#appendix-A.3 +[source,shell] +----- +openssl pkcs8 -topk8 -v1 PBE-MD5-DES -inform PEM -outform PEM -in testnode.pem -passin "pass:testnode" -out key_pkcs8_encrypted_pbes1_des.pem -passout "pass:testnode" +----- + +Create `PKCS#8` `PBES2` `AES` encrypted key from the encrypted `PKCS#1` encoded `testnode.pem` +The `-v2 aes256` parameter forces PBES2 with AES256 +See: http://oid-info.com/get/1.2.840.113549.1.5.13 +See: https://datatracker.ietf.org/doc/html/rfc8018#appendix-A.4 [source,shell] ----- -openssl pkcs8 -topk8 -inform PEM -outform PEM -in testnode.pem -out key_pkcs8_encrypted.pem +openssl pkcs8 -topk8 -v2 aes256 -inform PEM -outform PEM -in testnode.pem -passin "pass:testnode" -out key_pkcs8_encrypted_pbes2_aes.pem -passout "pass:testnode" ----- + +Create `PKCS#8` `PBES2` `DES` encrypted key from the encrypted `PKCS#1` encoded `testnode.pem` +The `-v2 des` parameter forces PBES2 with DES +See: http://oid-info.com/get/1.2.840.113549.1.5.13 +See: https://datatracker.ietf.org/doc/html/rfc8018#appendix-A.4 +[source,shell] +----- +openssl pkcs8 -topk8 -v2 des -inform PEM -outform PEM -in testnode.pem -passin "pass:testnode" -out key_pkcs8_encrypted_pbes2_des.pem -passout "pass:testnode" +----- + + [source,shell] ----- ssh-keygen -t ed25519 -f key_unsupported.pem diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/key_pkcs8_encrypted.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/key_pkcs8_encrypted.pem deleted file mode 100644 index 28059d5a2266d..0000000000000 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/key_pkcs8_encrypted.pem +++ /dev/null @@ -1,29 +0,0 @@ ------BEGIN ENCRYPTED PRIVATE KEY----- -MIIE6TAbBgkqhkiG9w0BBQMwDgQI2jwlFL0XId0CAggABIIEyMujZbpG6zKb2pVu -soamTaoLcZwNofS9ncGIEH1nbI8UpPY81VeOIBm4mneDt8RU5bIOXP4IZEZY9uU+ -pugKQ3hT8vBQjJujjuctUPaFxB0kGEeITOInY2jn2BFDbUgy5Z7EVD4G2K06SDDK -oD+twbzZo9x34VizwpHHb8wE+DFyYc+sp+Re2Qk3FReKgjdJezfcRHbKrrlx2rJ+ -k/YAPmzcFYVbuUiB6HY6BGzSJO1JxT8iNJE+Hmk3ZLXG590hp0vuGSkY/ihbeix4 -1rQs7u4riqXJ+DJBmXt/wXUij0/k6s4igACNsT2MkZkGEDkzqzE+kj2VYOHSX+Wd -5W0WCfftcsIQ8eow4ACec/Ns9ionLjx1xnbTjRMkpGgbVsreupU9AQ4MhLNNgwyl -six/cxUxTvH8Modd0/4KQFkeo352A6+DKCaPZ87SoF2Rge1otcJaZVcX1gBvIztB -/xzYwyUydQEwblU0kCYWRgxlKP9jxFoke2RX1BodRfAMNDxS0XyYrA/JzB7ZRsS7 -QGYPy/PPb014U3KhpJdjwbNu2VaCVdGryYA9+BTP+Vzwcp8MZoMPnnjnBh1YyVAj -c7oyzKU5e5SVsYni1Kt/536YxQgFCAUHv/g+zQqqGEvyiMXhsCwVpoy7UcFYgmlw -40g3+ejwvlO3YA67gQQKebEv6/Laz1hVNiXT0m3okAXWxXgF/g2eBO5NTRdtaWn3 -VNH5ub+LOr6cMhk9BAtKgRG+xeh8/2SqH12UbwtlmxqnBAfHtqlE6yJ1ViMDHxF9 -101xJlEONmC3fcEAjShK6LEbFwPJns3WbGc0ds36CzXWtO29XGssr+YoiF9e3Eus -/XQjmjOJxRoWkNEYsxlJ3IRH2vUcdCoAp8IlD7JYxx8UBCSJDBo7/0QKU6INeWjo -5+aNbaLAJULSKo1LTZjjANm+G+KcSnbn5Ed8fmY+D61A5/7WMIVxq/uDLFvxCnRG -QcLbtqbPztiWwWZOuTwNTA3bfAhEG0ZzNr+0z33jW5T9ChvdutgxJMf3Khazx9cx -mWyCpJwtSv7hSbp4nCS2fmHCum2yIrOnou8TSOlQFERZ3UEZMgLpWeupH/W5C3Ad -rOspFrK6K8a/iNSs5OdYUIK2iHddTs5u7AEZ9I5MTuYnccuGuXfQTTA06TJvJTax -c2oDbXMnXs4pHLiiSRp34IHIYubdrj8X5vTODC5djl8h1167ToXo5zGdXqT1om+u -4ndNLbbI1vld5G7KAL6TlTETg+N7S8v3KYoBEWzykwgqqppWnWTqPWQxM8Iph5ly -AQlzz7feERi/h/s57RZ5ksoVAdbtk2U6wgHnLrWhKZ7+ZOAfpNAjGHwWyXTzylXo -zQ9A8Kmd0jBMsru4fsGpldf4lTsqO/abUSWrAAREGnlz/ZjEb944Yox7JUhWC15C -WxXK2rFbiF3S0HtEvU17rdn4HCsZBilnY+hTpHj1MA6O451/A3ghxGXFKz/9LUcS -YBRQJaSM3hTqC3WoTVBeVc5nCFOpu4F89JqhEgXOLKweueMbTMRSNm93tXWT13s3 -Q/o0pNJv/K6+bIQwsX/oDafMXcW7STxQJObbAleRbcn8/rGS2eEnVZ6907faUR/L -7eu9vgAa/jh9FHpZ0Q== ------END ENCRYPTED PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/key_pkcs8_encrypted_pbes1_des.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/key_pkcs8_encrypted_pbes1_des.pem new file mode 100644 index 0000000000000..76d36a079055c --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/key_pkcs8_encrypted_pbes1_des.pem @@ -0,0 +1,29 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIE6TAbBgkqhkiG9w0BBQMwDgQIzMStkKSpOt0CAggABIIEyNdtzS+2uzAtCF35 +SEDSWBbtxkUAyQHxEjMxkzH8xE/60slG4yE3oPomRoE4hXOs1EUzwvKOJ7ykPXzc +zBuRTFns/b/A2ycfUOyTyblH0y2rXVcSN7Q/qVktcOoeg6mIuGHfXyJA4J9/neyy +SVGFNEWSa3kS6VtbS330deCrM6XXZmhusHMWmdmH+8jjEFrRksl/53P64VXO/THs +RtvtVJ8kGb49e38diMLuEpWbB9Pd1Nkqa4rgpcoYv2AuXXkBkDvs2iTsiSpCRuuQ +WpsKTNijVzv59LjLKki6JH7NxUef0C+MiYq+4Okbxk9apJXd5HsOEtGbVfTEOojT +bW03BHX4q/6BocdQLzMKILjkBs9XGIckRSFVId/+a1shAtAHISdIy0jq6tTCpScM +JIv8h3GhopF0EeVcij2KAEHTG3ky7RqpLZ6gTEUHDNhiNTZlMVNYo6vFk6agDzUx +Oh59UciG1t+LewpoLQy6n7PGcwoE9mTxXCWQt15RKVK+LKsWgShI4uBDhg4GMQfy +9CQT0YMXBpJQXPtf2cnpTCBFJptp1JtapnKBiHJzPmNPa0NK55dXPL6v89fFrCKp +3wurCYdNmLrS/qlhjva3/TXCfTZNQmCfl9DK+5q7nG1HTZYDfyPoAppITKbrUV9J +BSlFbBeIxqoN7bOpVhBXWYTSJBBokJED4mjwtFFMI5XE/4VTsqZ83ASjfv13xAsA +AJ+2kfjzFL2DXSXGiiPzipCeaZHFayJp6v5+81+76bXBwvrTLJ+/vvzXSOCBma2y +51qDxMhBwNewUdWsGUorcQ2lQzeRdeyFjeLNMgm3p8BXvTweae+raT9AJbtA1PvL +j7k312+DvFqg5VZkwM03kvTJeR2YBMQZub0e5biqIe8l8308Q8pZo9HTN4exGDfm +R67yLryCUPHcDfVdZoKmYeDv5h6e0cvnu3b+6ML2SVfMJ+yFqJFcWjQsr8furG0I ++3bACz9ykf5TcjjUV4ewQQp0uC0ryFBXxho1g3V4hJf86luEIqayoPLXkOCEhcy1 +oyY6h6tKXSlcYUHJagGqMyucRrs/mGcx4/YURyfxVNWQkCcUqA//+fEhWOBzFMiY +vkVWERW8KjlUtI+/kylUsgiJAIJpNJPXnltRDhdE/OT8kcxGYtl+CEIl/VN0N5nW +dNNoFN4E+zeR/KrEjhnRby/MpXHtNHVNVJbXARAkHsfCtWQ148ewuOx4IPMMiQ/C +z0DVv5GDP9+ObdW6kge/t9+fexcMH/DzfLDqO/qmP5xni4xP0JYU6wkzZ6k/F/Bz +s80PwJ/Y7LmT+1srqASm/Fetidb/shVh4VqKa5H8dYhnXfWKHghVR590qahAZtsm +1M7iqyYRW1nIplqtCPFCHSm/KDRb4Qwe/U+wjB5cYDFRfpoiZgfL+s7CWM0NU2Yp +RDW7ozCjf+w6XoCdlVZI2JppJA+zGWtUzznaTIe2RL9ioVxsREbnL/I+n0tzuLP3 +vqeyT4EfEGbcooEIPgJsARp0w20PqpqTgCMc42n4QWi6QDwdgAEP3Lob9mbYSrvp +OQWE3MxFEVfAyuaoBYJgP+aDY+UU55VwAfZeiW1+jfsvq7Uqcd9gVxwJdVA2zdlH +LXxWg4lHo5gLDVXHtQ== +-----END ENCRYPTED PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/key_pkcs8_encrypted_pbes2_aes.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/key_pkcs8_encrypted_pbes2_aes.pem new file mode 100644 index 0000000000000..005d4135e9215 --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/key_pkcs8_encrypted_pbes2_aes.pem @@ -0,0 +1,30 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFHzBJBgkqhkiG9w0BBQ0wPDAbBgkqhkiG9w0BBQwwDgQILJlhk+DPgdgCAggA +MB0GCWCGSAFlAwQBKgQQ/UWQTH4Tb5Lg6kkhJHSmmASCBNDFk3eueF24FbgZH0Es +LiX3B48Vgl4MK+s1uWfs81Vfd6azLlSGzlrgCUnNPm0RYCpUxtaFyHFEc4Rera6W +22x/r1GUgoGOJKRt4Cv9VmAFKybC+3owsN7/NKB3WD05fl+C3s3qpcv+iq9e83TG +aaFglWm5xzKurFQ8eqnpUMOg4wVLedqj57ySx81H/9wbWvnZf7+P0V96O53zZe7f +0LaV+DRgj9fHclqwTtrHT92GQKyzhVtBc012qN0IZCNQQJU64o1X+xGmwzhvLGLd +gFi4DtK41BM3jLvl2r0fV3j/jJr8IWmsNYzQt2DW/yNA/waqwuqyZVz/6d9Kn42h +g+LfdTj2IrtCHoUCJDc80v0e6zUaFu+KYyWqkGQUH8fNYppcZEhGUsezfB1Tthtr +Wb2axp/CkQKbMRRR6lrGK4swDuGvtofnWBHHb/ORJcmpDEs72njLYMPlljdfH1ra +PuXHeSKdLB6yec0uTtzAspeCkmXJ4lQBuKyWNgZo7s4xgbkbbQ78rGqOQKtvbjVQ +2joxF6lB7kUMgx3xDgRizI+J3eGPgRBjqHVonF8kbKx9aNlwXpCYRw8z+j6Hagah +cJbSOf3AynVcMZqDrpK0ZZQJyDoPQ4FzO/x26NdcIR2mWmGK45D+sApAgr2OESWI +YBjNyE3yA7UhlM0L5rJ8qMtbkx2/sdCndz015Wa+ZfKCmkBRFvdkBZOsc7dYzpeM +bgtIRUySOvw5QjRpjjghAzZmQMiSewozXZGnlDve3ftxUlxAbm/VPeKJr3H1M8gS +ysLQm0Le9QG3Hl17s/7Eo/ZcQyo5jH7aO2tdLUY+jiAahuDzBnrmqZWJIILxxPM+ +tlqt9kmslkUz1V/bYpl8cTB6Srd2M4cRc1nHXA887EtJs3siIxKHanwu9/xaOv9G +7OUwr/zfr77hFWZAvT/0Ln26p46TwD7nK44b/sbJhRkmXl/lb3znUbkH5U8qkIny +d6kRj6/5OjBIPqF4wEjt2v2OuaL5S31e6/up0yb2fazKfHqv0A4NI3zuPKiQH7Iy +yQjqIoNmGMEOEcLfkJl8ZSaxGdHAqzZCUEzin9Xjw3nMSWOsNo+SKp0LyjgDoidm +MuYtM7uSKr632SYEHNjgp/TNAuQ875ebq573VAd9DwL49/BY9H68GSKEGTv50IuT +V5r7wlK1lWkoHCbDNTa59R/PIr+1zxpZ+rYo9rSHOHYNp/lCYTRpQtoxk4jfpreA +QYVLQ+6iBhsZHL4JX0vvj0mBge3tELrOu+wNpnA0EWiqV6DCuBzNkZVUmDClU4y4 +jfQ+VxGdOZ6wbaPvl0ocsSqOvmdQiW4LNqF6RTIZ3q0dGzZq0CyVjhp/vqDCYRPU +RJjSjubjWOidKIQwc/az3uMF5wiJlwHmzlrAK0mmxbTa32sw9qz16E+UsbJw8ujy +E/aaJvli86mHlQIzO+K0u27c/7a960Jg8vdbmnYn20YbBwll9ivnoXbJ6/h22Fpw +jlu+anmUKXx0cyxQMe97VMqkOAJUCobAEjIrHm7XTLo29Dh/SaTy1spLAlTZ+12W +C2E65oJAVL+fh2BuNGCWG6AjMUaDnvoUbc0ClTsN138fLaBg8Qf3UaqwyZ7WmGEP +JhwAbR1VXQOHLlzJ5xqroNvDuw== +-----END ENCRYPTED PRIVATE KEY----- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/key_pkcs8_encrypted_pbes2_des.pem b/libs/ssl-config/src/test/resources/certs/pem-utils/key_pkcs8_encrypted_pbes2_des.pem new file mode 100644 index 0000000000000..110e7b5e2a27b --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/key_pkcs8_encrypted_pbes2_des.pem @@ -0,0 +1,29 @@ +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFCzA9BgkqhkiG9w0BBQ0wMDAbBgkqhkiG9w0BBQwwDgQISYrz2l4tmYACAggA +MBEGBSsOAwIHBAgqwuBynTK44ASCBMir0L/8IbtAjZCpBkHJ8ytKSP2KEivv+PsW +Xe+kwmKIIbvTgiVM7LVppTSpb6B0So0vOirm0asbKmEBj/KKmnzkbTRx/nbkPQGc +TnI1bseqo8JgjH8TglA3ozkpwNFZf4CMx8oV6yXvl66eKn6BMFbdLgVorqROhm6a +ONo2YbfvBZFHapwkBfS62nOAHs8KIqGQEwANimAwMQYpEHPTuDOpHGnNMM4AY2bu +84RqWfhMXgJtwJxjHCtSYCx05cNDpfiXjjKhr9YJwf+8fjooSmMPdZba2TeGiUWn +tUi4BL/fljk28gRUhLgcktmyoF+l2eYXNmdgJob6wKJSOhbONqxiGIGpiB527h8B +IF3unyJq/rZ44Af73i6GtNxZo5I+rXPa9uC0+159zhdGkToSLIlwUnQ9xTzgxNuc +gQhMKBlJoK1vOalfWtUHz+27JMmMYIzfdLdBA4mYb4YTwjebB7KIwEPJO6ufIXq/ +4t+TIbSSqCxkrqMIdBslB6ciOcYomLg6vK0xVnVgJA+XhzAgVmohuBZUmpSGbplE +2JXx2pKAjdiZ2X0xTJ3zbybNC3GdsbUskd4kUciJfSqSXLsm7crgJdSc1REhEI+k +mIpBU9yaEFN8S43TcQI6eB5rOVW0Tm8cPkH86S/XP1zYlgNh1zkqcO7Z0lYMTqur +3KEm2zqMUMwsOiRtwFXNirC7St73GTMFzmtdgLGywzxPas0gksSqOt1v/Gq9G7FQ +QJK4T3sOJeSMSIYzt/GIdB+Ha4wzKhaSWqdZnmskhTiXgIeLu0BRXgdY3lhg5Q+2 +xtR0TLVt26d+N03kj5HZ4Jl+DNWM3JY01ycZ/lk/CHERfpRLRGXXhXt01UloNNL2 +KmPdfbJhaCmsYnIXs02fHC6UdD5kXDRCNx9U4W9F0BAHU9brNNowtOgl4FzlTXYD +UEZk1zBPojOBfegaUiwOs60u8SMdHjgpz3AcG5YWp3idiLo9VhizYCDgv8XurSZD +KH+bCNgE9VMhsLZkyoOtZ5EmVTf2ZmTOIaTU2ddakhfLsAXQt30rAAU2DYAreDT0 +G2ZotpdxfIh98J0kVLrPKcOKxfshH55N6EWJK0YaXgvllBSzvR/l7EJU33A/FMPZ +bXWHfeRsyApXsefroMfqAuuyFAUYKFt7dPAyNpbmPw3hS9ZcKn7GSGrMKRisDuDM +bdrI2hCV/ylYIWUkj2GA/B1NGZ+iuYUKzlB37cjDeoqZhmVAY5Ns+U3+7FMAedGn +UmvM0U+nYWLx78EvzhxUZeSAgmct8NFlPLaxp964zogoMZq5Ah7RQ7fl5nr+NSBj +lPTcajhFBkwm1yEFpxzpL2LtMPEDbbInlEhKx+OEtxLTL3rlV0aZoRy4TYaToZvA +AukDSMg89XAKTSP/CtiKzO7b+iXficoFKSrUHLtyBqd5Wdv79SMfK8Jq0S9Tn5Nl +J4tI92uy/LIbR+MjWm8ViiCiRDThqGtbE+RHLY2JNsB7pSV0JLn13n1OQXi4SxF5 +s92J7C4fHX3pxziUNdMxYvXk6KYQrSBZe/Mrr0444Pmo94Sz1gwbRI4p+7r/A3h4 +5iVFoKf75q/fxww76gS0jMAoAnSEtYikKe/3c3PQWzdyh+uN/KqaliycRvyGf9s= +-----END ENCRYPTED PRIVATE KEY----- diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/AbstractObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/AbstractObjectParser.java index 8ec9e135acad1..b72f14613d28b 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/AbstractObjectParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/AbstractObjectParser.java @@ -156,7 +156,7 @@ public void declareField(BiConsumer consumer, CheckedFunction void declareObject(BiConsumer consumer, ContextParser objectParser, ParseField field) { - declareField(consumer, (p, c) -> objectParser.parse(p, c), field, ValueType.OBJECT); + declareField(consumer, objectParser, field, ValueType.OBJECT); } /** @@ -240,7 +240,7 @@ public void declareBoolean(BiConsumer consumer, ParseField field } public void declareObjectArray(BiConsumer> consumer, ContextParser objectParser, ParseField field) { - declareFieldArray(consumer, (p, c) -> objectParser.parse(p, c), field, ValueType.OBJECT_ARRAY); + declareFieldArray(consumer, objectParser, field, ValueType.OBJECT_ARRAY); } /** diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/ConstructingObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/ConstructingObjectParser.java index 3a0f3b7056b4a..e8ed7cd1faf4b 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/ConstructingObjectParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/ConstructingObjectParser.java @@ -426,7 +426,7 @@ private class Target { /** * The parse context that is used for this invocation. Stored here so that it can be passed to the {@link #builder}. */ - private Context context; + private final Context context; /** * How many of the constructor parameters have we collected? We keep track of this so we don't have to count the diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/InstantiatingObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/InstantiatingObjectParser.java index 8c23a71965e73..89ccb670c5c3a 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/InstantiatingObjectParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/InstantiatingObjectParser.java @@ -23,8 +23,12 @@ *

* The main differences being that it is using Builder to construct the parser and takes a class of the target object instead of the object * builder. The target object must have exactly one constructor with the number and order of arguments matching the number of order of - * declared fields. If there are more then 2 constructors with the same number of arguments, one of them needs to be marked with + * declared fields. If there are more than 2 constructors with the same number of arguments, one of them needs to be marked with * {@linkplain ParserConstructor} annotation. + * + * It is also possible for the constructor to accept Context as the first parameter, in this case as in the case with multiple constructors + * it is required for the constructor to be marked with {@linkplain ParserConstructor} annotation. + * *

{@code
  *   public static class Thing{
  *       public Thing(String animal, String vegetable, int mineral) {
@@ -37,14 +41,35 @@
  *
  *   }
  *
- *   private static final InstantiatingObjectParser PARSER = new InstantiatingObjectParser<>("thing", Thing.class);
+ *   private static final InstantiatingObjectParser PARSER;
+ *   static {
+ *       InstantiatingObjectParser.Builder parser =
+ *           InstantiatingObjectParser,builder<>("thing", true, Thing.class);
+ *       parser.declareString(constructorArg(), new ParseField("animal"));
+ *       parser.declareString(constructorArg(), new ParseField("vegetable"));
+ *       parser.declareInt(optionalConstructorArg(), new ParseField("mineral"));
+ *       parser.declareInt(Thing::setFruit, new ParseField("fruit"));
+ *       parser.declareInt(Thing::setBug, new ParseField("bug"));
+ *       PARSER = parser.build()
+ *   }
+ * }
+ *
{@code
+ *
+ *   public static class AnotherThing {
+ *       @ParserConstructor
+ *       public AnotherThing(SomeContext continent, String animal, String vegetable, int mineral) {
+ *           ....
+ *       }
+ *   }
+ *
+ *   private static final InstantiatingObjectParser PARSER;
  *   static {
- *       PARSER.declareString(constructorArg(), new ParseField("animal"));
- *       PARSER.declareString(constructorArg(), new ParseField("vegetable"));
- *       PARSER.declareInt(optionalConstructorArg(), new ParseField("mineral"));
- *       PARSER.declareInt(Thing::setFruit, new ParseField("fruit"));
- *       PARSER.declareInt(Thing::setBug, new ParseField("bug"));
- *       PARSER.finalizeFields()
+ *       InstantiatingObjectParser.Builder parser =
+ *           InstantiatingObjectParser,builder<>("thing", true, AnotherThing.class);
+ *       parser.declareString(constructorArg(), new ParseField("animal"));
+ *       parser.declareString(constructorArg(), new ParseField("vegetable"));
+ *       parser.declareInt(optionalConstructorArg(), new ParseField("mineral"));
+ *       PARSER = parser.build()
  *   }
  * }
*/ @@ -72,7 +97,7 @@ public Builder(String name, Class valueClass) { } public Builder(String name, boolean ignoreUnknownFields, Class valueClass) { - this.constructingObjectParser = new ConstructingObjectParser<>(name, ignoreUnknownFields, this::build); + this.constructingObjectParser = new ConstructingObjectParser<>(name, ignoreUnknownFields, this::buildInstance); this.valueClass = valueClass; } @@ -87,9 +112,15 @@ public InstantiatingObjectParser build() { throw new IllegalArgumentException("More then one public constructor with @ParserConstructor annotation exist in " + "the class " + valueClass.getName()); } - if (c.getParameterCount() != neededArguments) { - throw new IllegalArgumentException("Annotated constructor doesn't have " + neededArguments + - " arguments in the class " + valueClass.getName()); + if (c.getParameterCount() < neededArguments || c.getParameterCount() > neededArguments + 1) { + throw new IllegalArgumentException( + "Annotated constructor doesn't have " + + neededArguments + + " or " + + (neededArguments + 1) + + " arguments in the class " + + valueClass.getName() + ); } constructor = c; } @@ -154,13 +185,20 @@ public void declareExclusiveFieldSet(String... exclusiveSet) { constructingObjectParser.declareExclusiveFieldSet(exclusiveSet); } - private Value build(Object[] args) { + private Value buildInstance(Object[] args, Context context) { if (constructor == null) { throw new IllegalArgumentException("InstantiatingObjectParser for type " + valueClass.getName() + " has to be finalized " + "before the first use"); } try { - return constructor.newInstance(args); + if (constructor.getParameterCount() != args.length) { + Object[] newArgs = new Object[args.length + 1]; + System.arraycopy(args, 0, newArgs, 1, args.length); + newArgs[0] = context; + return constructor.newInstance(newArgs); + } else { + return constructor.newInstance(args); + } } catch (Exception ex) { throw new IllegalArgumentException("Cannot instantiate an object of " + valueClass.getName(), ex); } diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java index f9aafcfb51f5a..6eb8bef86ebe7 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/ObjectParser.java @@ -170,7 +170,7 @@ public ObjectParser(String name) { * @param valueSupplier A supplier that creates a new Value instance. Used when the parser is used as an inner object parser. */ public ObjectParser(String name, @Nullable Supplier valueSupplier) { - this(name, errorOnUnknown(), c -> valueSupplier.get()); + this(name, errorOnUnknown(), wrapValueSupplier(valueSupplier)); } /** @@ -192,7 +192,13 @@ public static ObjectParser fromBuilder(String n * @param valueSupplier a supplier that creates a new Value instance used when the parser is used as an inner object parser. */ public ObjectParser(String name, boolean ignoreUnknownFields, @Nullable Supplier valueSupplier) { - this(name, ignoreUnknownFields ? ignoreUnknown() : errorOnUnknown(), c -> valueSupplier.get()); + this(name, ignoreUnknownFields ? ignoreUnknown() : errorOnUnknown(), wrapValueSupplier(valueSupplier)); + } + + private static Function wrapValueSupplier(@Nullable Supplier valueSupplier) { + return valueSupplier == null ? c -> { + throw new NullPointerException(); + } : c -> valueSupplier.get(); } /** @@ -202,7 +208,7 @@ public ObjectParser(String name, boolean ignoreUnknownFields, @Nullable Supplier * @param valueSupplier a supplier that creates a new Value instance used when the parser is used as an inner object parser. */ public ObjectParser(String name, UnknownFieldConsumer unknownFieldConsumer, @Nullable Supplier valueSupplier) { - this(name, consumeUnknownField(unknownFieldConsumer), c -> valueSupplier.get()); + this(name, consumeUnknownField(unknownFieldConsumer), wrapValueSupplier(valueSupplier)); } /** @@ -219,7 +225,7 @@ public ObjectParser( BiConsumer unknownFieldConsumer, @Nullable Supplier valueSupplier ) { - this(name, unknownIsNamedXContent(categoryClass, unknownFieldConsumer), c -> valueSupplier.get()); + this(name, unknownIsNamedXContent(categoryClass, unknownFieldConsumer), wrapValueSupplier(valueSupplier)); } /** @@ -232,7 +238,9 @@ private ObjectParser(String name, UnknownFieldParser unknownFiel @Nullable Function valueBuilder) { this.name = name; this.unknownFieldParser = unknownFieldParser; - this.valueBuilder = valueBuilder; + this.valueBuilder = valueBuilder == null ? c -> { + throw new NullPointerException("valueBuilder is not set"); + } : valueBuilder; } /** @@ -244,9 +252,6 @@ private ObjectParser(String name, UnknownFieldParser unknownFiel */ @Override public Value parse(XContentParser parser, Context context) throws IOException { - if (valueBuilder == null) { - throw new NullPointerException("valueBuilder is not set"); - } return parse(parser, valueBuilder.apply(context), context); } @@ -260,60 +265,52 @@ public Value parse(XContentParser parser, Context context) throws IOException { */ public Value parse(XContentParser parser, Value value, Context context) throws IOException { XContentParser.Token token; - if (parser.currentToken() == XContentParser.Token.START_OBJECT) { - token = parser.currentToken(); - } else { + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] Expected START_OBJECT but was: " + token); + throwExpectedStartObject(parser, token); } } FieldParser fieldParser = null; String currentFieldName = null; XContentLocation currentPosition = null; - List requiredFields = new ArrayList<>(this.requiredFieldSets); - List> exclusiveFields = new ArrayList<>(); - for (int i = 0; i < this.exclusiveFieldSets.size(); i++) { - exclusiveFields.add(new ArrayList<>()); + final List requiredFields = this.requiredFieldSets.isEmpty() ? null : new ArrayList<>(this.requiredFieldSets); + final List> exclusiveFields; + if (exclusiveFieldSets.isEmpty()) { + exclusiveFields = null; + } else { + exclusiveFields = new ArrayList<>(); + for (int i = 0; i < this.exclusiveFieldSets.size(); i++) { + exclusiveFields.add(new ArrayList<>()); + } } + final Map parsers = fieldParserMap.getOrDefault(parser.getRestApiVersion(), Collections.emptyMap()); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); currentPosition = parser.getTokenLocation(); - fieldParser = fieldParserMap.getOrDefault(parser.getRestApiVersion(), Collections.emptyMap()) - .get(currentFieldName); + fieldParser = parsers.get(currentFieldName); } else { if (currentFieldName == null) { - throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] no field found"); + throwNoFieldFound(parser); } if (fieldParser == null) { unknownFieldParser.acceptUnknownField(this, currentFieldName, currentPosition, parser, value, context); } else { fieldParser.assertSupports(name, parser, currentFieldName); - // Check to see if this field is a required field, if it is we can - // remove the entry as the requirement is satisfied - Iterator iter = requiredFields.iterator(); - while (iter.hasNext()) { - String[] requriedFields = iter.next(); - for (String field : requriedFields) { - if (field.equals(currentFieldName)) { - iter.remove(); - break; - } - } + if (requiredFields != null) { + // Check to see if this field is a required field, if it is we can + // remove the entry as the requirement is satisfied + maybeMarkRequiredField(currentFieldName, requiredFields); } - // Check if this field is in an exclusive set, if it is then mark - // it as seen. - for (int i = 0; i < this.exclusiveFieldSets.size(); i++) { - for (String field : this.exclusiveFieldSets.get(i)) { - if (field.equals(currentFieldName)) { - exclusiveFields.get(i).add(currentFieldName); - } - } + if (exclusiveFields != null) { + // Check if this field is in an exclusive set, if it is then mark + // it as seen. + maybeMarkExclusiveField(currentFieldName, exclusiveFields); } parseSub(parser, fieldParser, currentFieldName, value, context); @@ -322,26 +319,68 @@ public Value parse(XContentParser parser, Value value, Context context) throws I } } - // Check for a) multiple entries appearing in exclusive field sets and b) empty - // required field entries - StringBuilder message = new StringBuilder(); + // Check for a) multiple entries appearing in exclusive field sets and b) empty required field entries + if (exclusiveFields != null) { + ensureExclusiveFields(exclusiveFields); + } + if (requiredFields != null && requiredFields.isEmpty() == false) { + throwMissingRequiredFields(requiredFields); + } + return value; + } + + private void throwExpectedStartObject(XContentParser parser, XContentParser.Token token) { + throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] Expected START_OBJECT but was: " + token); + } + + private void throwNoFieldFound(XContentParser parser) { + throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] no field found"); + } + + private void throwMissingRequiredFields(List requiredFields) { + final StringBuilder message = new StringBuilder(); + for (String[] fields : requiredFields) { + message.append("Required one of fields ").append(Arrays.toString(fields)).append(", but none were specified. "); + } + throw new IllegalArgumentException(message.toString()); + } + + private void ensureExclusiveFields(List> exclusiveFields) { + StringBuilder message = null; for (List fieldset : exclusiveFields) { if (fieldset.size() > 1) { - message.append("The following fields are not allowed together: ").append(fieldset.toString()).append(" "); + if (message == null) { + message = new StringBuilder(); + } + message.append("The following fields are not allowed together: ").append(fieldset).append(" "); } } - if (message.length() > 0) { + if (message != null && message.length() > 0) { throw new IllegalArgumentException(message.toString()); } + } - if (requiredFields.isEmpty() == false) { - for (String[] fields : requiredFields) { - message.append("Required one of fields ").append(Arrays.toString(fields)).append(", but none were specified. "); + private void maybeMarkExclusiveField(String currentFieldName, List> exclusiveFields) { + for (int i = 0; i < this.exclusiveFieldSets.size(); i++) { + for (String field : this.exclusiveFieldSets.get(i)) { + if (field.equals(currentFieldName)) { + exclusiveFields.get(i).add(currentFieldName); + } } - throw new IllegalArgumentException(message.toString()); } + } - return value; + private void maybeMarkRequiredField(String currentFieldName, List requiredFields) { + Iterator iter = requiredFields.iterator(); + while (iter.hasNext()) { + String[] requiredFieldNames = iter.next(); + for (String field : requiredFieldNames) { + if (field.equals(currentFieldName)) { + iter.remove(); + break; + } + } + } } @Override @@ -427,14 +466,10 @@ public void declareNamedObject(BiConsumer consumer, NamedObjectPar assert token == XContentParser.Token.END_OBJECT; return namedObject; } catch (Exception e) { - throw new XContentParseException( - p.getTokenLocation(), - "[" + field + "] failed to parse field [" + currentName + "]", - e - ); + throw rethrowFieldParseFailure(field, p, currentName, e); } } catch (IOException e) { - throw new XContentParseException(p.getTokenLocation(), "[" + field + "] error while parsing named object", e); + throw wrapParseError(field, p, e, "error while parsing named object"); } }; @@ -447,8 +482,7 @@ public void declareNamedObjects(BiConsumer> consumer, NamedOb // This creates and parses the named object BiFunction objectParser = (XContentParser p, Context c) -> { if (p.currentToken() != XContentParser.Token.FIELD_NAME) { - throw new XContentParseException(p.getTokenLocation(), "[" + field + "] can be a single object with any number of " - + "fields or an array where each entry is an object with a single field"); + throw wrapCanBeObjectOrArrayOfObjects(field, p); } // This messy exception nesting has the nice side effect of telling the user which field failed to parse try { @@ -456,45 +490,59 @@ public void declareNamedObjects(BiConsumer> consumer, NamedOb try { return namedObjectParser.parse(p, c, currentName); } catch (Exception e) { - throw new XContentParseException( - p.getTokenLocation(), - "[" + field + "] failed to parse field [" + currentName + "]", - e - ); + throw rethrowFieldParseFailure(field, p, currentName, e); } } catch (IOException e) { - throw new XContentParseException(p.getTokenLocation(), "[" + field + "] error while parsing", e); + throw wrapParseError(field, p, e, "error while parsing"); } }; declareField((XContentParser p, Value v, Context c) -> { List fields = new ArrayList<>(); - XContentParser.Token token; if (p.currentToken() == XContentParser.Token.START_OBJECT) { // Fields are just named entries in a single object - while ((token = p.nextToken()) != XContentParser.Token.END_OBJECT) { + while (p.nextToken() != XContentParser.Token.END_OBJECT) { fields.add(objectParser.apply(p, c)); } } else if (p.currentToken() == XContentParser.Token.START_ARRAY) { // Fields are objects in an array. Each object contains a named field. - orderedModeCallback.accept(v); - while ((token = p.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token != XContentParser.Token.START_OBJECT) { - throw new XContentParseException(p.getTokenLocation(), "[" + field + "] can be a single object with any number of " - + "fields or an array where each entry is an object with a single field"); - } - p.nextToken(); // Move to the first field in the object - fields.add(objectParser.apply(p, c)); - p.nextToken(); // Move past the object, should be back to into the array - if (p.currentToken() != XContentParser.Token.END_OBJECT) { - throw new XContentParseException(p.getTokenLocation(), "[" + field + "] can be a single object with any number of " - + "fields or an array where each entry is an object with a single field"); - } - } + parseObjectsInArray(orderedModeCallback, field, objectParser, p, v, c, fields); } consumer.accept(v, fields); }, field, ValueType.OBJECT_ARRAY); } + private void parseObjectsInArray(Consumer orderedModeCallback, + ParseField field, BiFunction objectParser, + XContentParser p, Value v, Context c, + List fields) throws IOException { + orderedModeCallback.accept(v); + XContentParser.Token token; + while ((token = p.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token != XContentParser.Token.START_OBJECT) { + throw wrapCanBeObjectOrArrayOfObjects(field, p); + } + p.nextToken(); // Move to the first field in the object + fields.add(objectParser.apply(p, c)); + p.nextToken(); // Move past the object, should be back to into the array + if (p.currentToken() != XContentParser.Token.END_OBJECT) { + throw wrapCanBeObjectOrArrayOfObjects(field, p); + } + } + } + + private XContentParseException wrapCanBeObjectOrArrayOfObjects(ParseField field, XContentParser p) { + return new XContentParseException(p.getTokenLocation(), "[" + field + "] can be a single object with any number of " + + "fields or an array where each entry is an object with a single field"); + } + + private XContentParseException wrapParseError(ParseField field, XContentParser p, IOException e, String s) { + return new XContentParseException(p.getTokenLocation(), "[" + field + "] " + s, e); + } + + private XContentParseException rethrowFieldParseFailure(ParseField field, XContentParser p, String currentName, Exception e) { + return new XContentParseException(p.getTokenLocation(), "[" + field + "] failed to parse field [" + currentName + "]", e); + } + @Override public void declareNamedObjects(BiConsumer> consumer, NamedObjectParser namedObjectParser, ParseField field) { @@ -537,24 +585,24 @@ public void declareExclusiveFieldSet(String... exclusiveSet) { this.exclusiveFieldSets.add(exclusiveSet); } - private void parseArray(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) - throws IOException { + private void parseArray(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) { assert parser.currentToken() == XContentParser.Token.START_ARRAY : "Token was: " + parser.currentToken(); parseValue(parser, fieldParser, currentFieldName, value, context); } - private void parseValue(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) - throws IOException { + private void parseValue(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) { try { fieldParser.parser.parse(parser, value, context); } catch (Exception ex) { - throw new XContentParseException(parser.getTokenLocation(), - "[" + name + "] failed to parse field [" + currentFieldName + "]", ex); + throwFailedToParse(parser, currentFieldName, ex); } } - private void parseSub(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) - throws IOException { + private void throwFailedToParse(XContentParser parser, String currentFieldName, Exception ex) { + throw new XContentParseException(parser.getTokenLocation(), "[" + name + "] failed to parse field [" + currentFieldName + "]", ex); + } + + private void parseSub(XContentParser parser, FieldParser fieldParser, String currentFieldName, Value value, Context context) { final XContentParser.Token token = parser.currentToken(); switch (token) { case START_OBJECT: @@ -568,7 +616,7 @@ private void parseSub(XContentParser parser, FieldParser fieldParser, String cur * for having a cheap test. */ if (parser.currentToken() != XContentParser.Token.END_OBJECT) { - throw new IllegalStateException("parser for [" + currentFieldName + "] did not end on END_OBJECT"); + throwMustEndOn(currentFieldName, XContentParser.Token.END_OBJECT); } break; case START_ARRAY: @@ -582,13 +630,13 @@ private void parseSub(XContentParser parser, FieldParser fieldParser, String cur * for having a cheap test. */ if (parser.currentToken() != XContentParser.Token.END_ARRAY) { - throw new IllegalStateException("parser for [" + currentFieldName + "] did not end on END_ARRAY"); + throwMustEndOn(currentFieldName, XContentParser.Token.END_ARRAY); } break; case END_OBJECT: case END_ARRAY: case FIELD_NAME: - throw new XContentParseException(parser.getTokenLocation(), "[" + name + "]" + token + " is unexpected"); + throw throwUnexpectedToken(parser, token); case VALUE_STRING: case VALUE_NUMBER: case VALUE_BOOLEAN: @@ -598,6 +646,14 @@ private void parseSub(XContentParser parser, FieldParser fieldParser, String cur } } + private void throwMustEndOn(String currentFieldName, XContentParser.Token token) { + throw new IllegalStateException("parser for [" + currentFieldName + "] did not end on " + token); + } + + private XContentParseException throwUnexpectedToken(XContentParser parser, XContentParser.Token token) { + return new XContentParseException(parser.getTokenLocation(), "[" + name + "]" + token + " is unexpected"); + } + private class FieldParser { private final Parser parser; private final EnumSet supportedTokens; diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContent.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContent.java index d40bedf38b39f..227518b44c209 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContent.java @@ -82,6 +82,10 @@ XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationH XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, byte[] data, int offset, int length) throws IOException; + XContentParser createParser(NamedXContentRegistry xContentRegistry, + DeprecationHandler deprecationHandler, byte[] data, int offset, int length, FilterPath[] includes, + FilterPath[] excludes) throws IOException; + /** * Creates a parser over the provided reader. */ diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/cbor/CborXContent.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/cbor/CborXContent.java index 9dfb6f47f7e86..d43e3b10b225c 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/cbor/CborXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/cbor/CborXContent.java @@ -112,6 +112,26 @@ public XContentParser createParser(NamedXContentRegistry xContentRegistry, return createParserForCompatibility(xContentRegistry, deprecationHandler, data, offset, length, RestApiVersion.current()); } + @Override + public XContentParser createParser( + NamedXContentRegistry xContentRegistry, + DeprecationHandler deprecationHandler, + byte[] data, + int offset, + int length, + FilterPath[] includes, + FilterPath[] excludes + ) throws IOException { + return new CborXContentParser( + xContentRegistry, + deprecationHandler, + cborFactory.createParser(new ByteArrayInputStream(data, offset, length)), + RestApiVersion.current(), + includes, + excludes + ); + } + @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, Reader reader) throws IOException { diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/json/JsonXContent.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/json/JsonXContent.java index cf551f5761315..10df2c1c10d8d 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/json/JsonXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/json/JsonXContent.java @@ -113,6 +113,26 @@ public XContentParser createParser(NamedXContentRegistry xContentRegistry, return createParserForCompatibility(xContentRegistry, deprecationHandler, data, offset, length, RestApiVersion.current()); } + @Override + public XContentParser createParser( + NamedXContentRegistry xContentRegistry, + DeprecationHandler deprecationHandler, + byte[] data, + int offset, + int length, + FilterPath[] includes, + FilterPath[] excludes + ) throws IOException { + return new JsonXContentParser( + xContentRegistry, + deprecationHandler, + jsonFactory.createParser(new ByteArrayInputStream(data, offset, length)), + RestApiVersion.current(), + includes, + excludes + ); + } + @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, Reader reader) throws IOException { diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/smile/SmileXContent.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/smile/SmileXContent.java index e02f8ec307af8..696865a242830 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/smile/SmileXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/smile/SmileXContent.java @@ -114,6 +114,26 @@ public XContentParser createParser(NamedXContentRegistry xContentRegistry, return createParserForCompatibility(xContentRegistry, deprecationHandler, data, offset, length, RestApiVersion.current()); } + @Override + public XContentParser createParser( + NamedXContentRegistry xContentRegistry, + DeprecationHandler deprecationHandler, + byte[] data, + int offset, + int length, + FilterPath[] includes, + FilterPath[] excludes + ) throws IOException { + return new SmileXContentParser( + xContentRegistry, + deprecationHandler, + smileFactory.createParser(new ByteArrayInputStream(data, offset, length)), + RestApiVersion.current(), + includes, + excludes + ); + } + @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, Reader reader) throws IOException { diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/yaml/YamlXContent.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/yaml/YamlXContent.java index b3a684d20583d..68f1ac2bbf27b 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/yaml/YamlXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/yaml/YamlXContent.java @@ -106,6 +106,26 @@ public XContentParser createParser(NamedXContentRegistry xContentRegistry, return createParserForCompatibility(xContentRegistry, deprecationHandler, data, offset, length, RestApiVersion.current()); } + @Override + public XContentParser createParser( + NamedXContentRegistry xContentRegistry, + DeprecationHandler deprecationHandler, + byte[] data, + int offset, + int length, + FilterPath[] includes, + FilterPath[] excludes + ) throws IOException { + return new YamlXContentParser( + xContentRegistry, + deprecationHandler, + yamlFactory.createParser(new ByteArrayInputStream(data, offset, length)), + RestApiVersion.current(), + includes, + excludes + ); + } + @Override public XContentParser createParser(NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, Reader reader) throws IOException { diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/ConstructingObjectParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/ConstructingObjectParserTests.java index e6662ca3d96bc..0025faa30b0dd 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/xcontent/ConstructingObjectParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/ConstructingObjectParserTests.java @@ -626,8 +626,8 @@ public void testCompatibleFieldDeclarations() throws IOException { RestApiVersion.minimumSupported()); StructWithCompatibleFields o = StructWithCompatibleFields.PARSER.parse(parser, null); assertEquals(1, o.intField); - assertWarnings(false, "[struct_with_compatible_fields][1:14] " + - "Deprecated field [old_name] used, expected [new_name] instead"); + assertWarnings(false, new DeprecationWarning(DeprecationLogger.CRITICAL, "[struct_with_compatible_fields][1:14] " + + "Deprecated field [old_name] used, expected [new_name] instead")); } } diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/InstantiatingObjectParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/InstantiatingObjectParserTests.java index db155c2334851..34f02b373582e 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/xcontent/InstantiatingObjectParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/InstantiatingObjectParserTests.java @@ -8,11 +8,8 @@ package org.elasticsearch.xcontent; -import org.elasticsearch.xcontent.InstantiatingObjectParser; -import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xcontent.ParserConstructor; -import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; import java.util.Objects; @@ -217,8 +214,10 @@ public void testAnnotationWrongArgumentNumber() { InstantiatingObjectParser.Builder builder = InstantiatingObjectParser.builder("foo", Annotations.class); builder.declareInt(constructorArg(), new ParseField("a")); builder.declareString(constructorArg(), new ParseField("b")); + builder.declareInt(constructorArg(), new ParseField("c")); + builder.declareString(constructorArg(), new ParseField("d")); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); - assertThat(e.getMessage(), containsString("Annotated constructor doesn't have 2 arguments in the class")); + assertThat(e.getMessage(), containsString("Annotated constructor doesn't have 4 or 5 arguments in the class")); } public void testDoubleDeclarationThrowsException() throws IOException { @@ -240,4 +239,80 @@ class DoubleFieldDeclaration { assertThat(exception, instanceOf(IllegalArgumentException.class)); assertThat(exception.getMessage(), startsWith("Parser already registered for name=[name]")); } + + public static class ContextArgument { + final String context; + final int a; + final String b; + final long c; + + public ContextArgument() { + this(1, "2", 3); + } + + public ContextArgument(int a, String b) { + this(a, b, -1); + } + + + public ContextArgument(int a, String b, long c) { + this(null, a, b, c); + } + + public ContextArgument(String context, int a, String b, long c) { + this.context = context; + this.a = a; + this.b = b; + this.c = c; + } + + @ParserConstructor + public ContextArgument(String context, int a, String b, String c) { + this.context = context; + this.a = a; + this.b = b; + this.c = Long.parseLong(c); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ContextArgument that = (ContextArgument) o; + return a == that.a && + c == that.c && + Objects.equals(b, that.b); + } + + @Override + public int hashCode() { + return Objects.hash(a, b, c); + } + } + + public void testContextAsArgument() throws IOException { + InstantiatingObjectParser.Builder builder = InstantiatingObjectParser.builder( + "foo", + ContextArgument.class + ); + builder.declareInt(constructorArg(), new ParseField("a")); + builder.declareString(constructorArg(), new ParseField("b")); + builder.declareString(constructorArg(), new ParseField("c")); + InstantiatingObjectParser parser = builder.build(); + try (XContentParser contentParser = createParser(JsonXContent.jsonXContent, "{\"a\": 5, \"b\":\"6\", \"c\": \"7\"}")) { + assertThat(parser.parse(contentParser, "context"), equalTo(new ContextArgument("context", 5, "6", 7))); + } + } + + public void testContextAsArgumentWrongArgumentNumber() { + InstantiatingObjectParser.Builder builder = InstantiatingObjectParser.builder( + "foo", + ContextArgument.class + ); + builder.declareInt(constructorArg(), new ParseField("a")); + builder.declareString(constructorArg(), new ParseField("b")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, builder::build); + assertThat(e.getMessage(), containsString("Annotated constructor doesn't have 2 or 3 arguments in the class")); + } + } diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/ObjectParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/ObjectParserTests.java index 278702a4bb1e0..8882d38358f0c 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/xcontent/ObjectParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/ObjectParserTests.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.xcontent; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.RestApiVersion; @@ -211,7 +212,8 @@ class TestStruct { objectParser.declareField((i, v, c) -> v.test = i.text(), new ParseField("test", "old_test"), ObjectParser.ValueType.STRING); objectParser.parse(parser, s, null); assertEquals("foo", s.test); - assertWarnings(false, "[foo][1:15] Deprecated field [old_test] used, expected [test] instead"); + assertWarnings(false, new DeprecationWarning(DeprecationLogger.CRITICAL, "[foo][1:15] Deprecated field [old_test] used, " + + "expected [test] instead")); } public void testFailOnValueType() throws IOException { @@ -1072,8 +1074,8 @@ public void testCompatibleFieldDeclarations() throws IOException { RestApiVersion.minimumSupported()); StructWithCompatibleFields o = StructWithCompatibleFields.PARSER.parse(parser, null); assertEquals(1, o.intField); - assertWarnings(false, "[struct_with_compatible_fields][1:14] " + - "Deprecated field [old_name] used, expected [new_name] instead"); + assertWarnings(false, new DeprecationWarning(DeprecationLogger.CRITICAL, "[struct_with_compatible_fields][1:14] " + + "Deprecated field [old_name] used, expected [new_name] instead")); } } diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractXContentFilteringTestCase.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java similarity index 98% rename from server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractXContentFilteringTestCase.java rename to libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java index 87d4d92c7a910..af7e9aae149d8 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/AbstractXContentFilteringTestCase.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/AbstractXContentFilteringTestCase.java @@ -6,10 +6,11 @@ * Side Public License, v 1. */ -package org.elasticsearch.common.xcontent.support.filtering; +package org.elasticsearch.xcontent.support.filtering; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.support.AbstractFilteringTestCase; import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContent; @@ -17,8 +18,6 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; -import org.elasticsearch.common.xcontent.support.AbstractFilteringTestCase; -import org.elasticsearch.xcontent.support.filtering.FilterPath; import java.io.IOException; import java.util.Arrays; @@ -142,6 +141,8 @@ static void assertXContentBuilderAsBytes(final XContentBuilder expected, final X assertThat(jsonParser.numberType(), equalTo(testParser.numberType())); assertThat(jsonParser.numberValue(), equalTo(testParser.numberValue())); break; + default: + break; } } } catch (Exception e) { diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/CborXContentFilteringTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/CborXContentFilteringTests.java similarity index 93% rename from server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/CborXContentFilteringTests.java rename to libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/CborXContentFilteringTests.java index ce4c5d005c759..5b2dce8e10106 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/CborXContentFilteringTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/CborXContentFilteringTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.common.xcontent.support.filtering; +package org.elasticsearch.xcontent.support.filtering; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java rename to libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java index 3d73c8717e7ef..ad669a3e61b5d 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/FilterPathGeneratorFilteringTests.java @@ -6,14 +6,14 @@ * Side Public License, v 1. */ -package org.elasticsearch.common.xcontent.support.filtering; +package org.elasticsearch.xcontent.support.filtering; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.filter.FilteringGeneratorDelegate; + import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xcontent.support.filtering.FilterPathBasedFilter; import java.util.Collections; diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/FilterPathTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathTests.java rename to libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/FilterPathTests.java index 2046772e0afcf..c8a65e90a4c3a 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/FilterPathTests.java @@ -6,10 +6,9 @@ * Side Public License, v 1. */ -package org.elasticsearch.common.xcontent.support.filtering; +package org.elasticsearch.xcontent.support.filtering; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xcontent.support.filtering.FilterPath; import java.util.Arrays; import java.util.LinkedHashSet; diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/JsonXContentFilteringTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/JsonXContentFilteringTests.java similarity index 93% rename from server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/JsonXContentFilteringTests.java rename to libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/JsonXContentFilteringTests.java index fffdbb2ad8818..5a27954754d43 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/JsonXContentFilteringTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/JsonXContentFilteringTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.common.xcontent.support.filtering; +package org.elasticsearch.xcontent.support.filtering; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/SmileFilteringGeneratorTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/SmileFilteringGeneratorTests.java similarity index 93% rename from server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/SmileFilteringGeneratorTests.java rename to libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/SmileFilteringGeneratorTests.java index 7c54668d17192..13efcc0738949 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/SmileFilteringGeneratorTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/SmileFilteringGeneratorTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.common.xcontent.support.filtering; +package org.elasticsearch.xcontent.support.filtering; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/YamlFilteringGeneratorTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/YamlFilteringGeneratorTests.java similarity index 93% rename from server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/YamlFilteringGeneratorTests.java rename to libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/YamlFilteringGeneratorTests.java index 00769671707a2..ada8b696c5d64 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/support/filtering/YamlFilteringGeneratorTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/support/filtering/YamlFilteringGeneratorTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.common.xcontent.support.filtering; +package org.elasticsearch.xcontent.support.filtering; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_versioned_update.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_versioned_update.yml index 780f33be52dc0..68fa8d6c86014 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_versioned_update.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_versioned_update.yml @@ -1,8 +1,8 @@ --- "Test pipeline versioned updates": - skip: - version: " - 7.99.99" - reason: "re-enable in 7.16+ when backported" + version: " - 7.15.99" + reason: "added versioned updates in 7.16.0" - do: ingest.put_pipeline: diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index b875c88a28a48..36de08b9a4d7e 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -470,8 +470,8 @@ public Processor create( boolean valid = metadata.isValid(currentState.metadata().settings()); if (valid && metadata.isCloseToExpiration()) { - HeaderWarning.addWarning("database [{}] was not updated for over 25 days, geoip processor will stop working if there " + - "is no update for 30 days", databaseFile); + HeaderWarning.addWarning(DeprecationLogger.CRITICAL, "database [{}] was not updated for over 25 days, geoip processor" + + " will stop working if there is no update for 30 days", databaseFile); } return valid; diff --git a/modules/lang-expression/licenses/lucene-expressions-9.0.0-snapshot-2719cf6630e.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..0fb855d15dd41 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +293fe996ddfcd57d1431bb73f0b59d077b946374 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index fe9352ca233c8..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3bc34dea0b46e0f6429b054f848e2611d6e1d3e7 \ No newline at end of file diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt index 61986fab9423c..cf8f1ff2560b5 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/org.elasticsearch.script.fields.txt @@ -13,11 +13,13 @@ class org.elasticsearch.script.field.Field @dynamic_type { String getName() boolean isEmpty() int size() - List getValues() - def getValue(def) - def getValue(int, def) } class org.elasticsearch.script.DocBasedScript { - org.elasticsearch.script.field.Field field(String) + org.elasticsearch.script.field.Field field(String) +} + +class org.elasticsearch.script.field.DelegateDocValuesField @dynamic_type { + def getValue(def) + List getValues() } diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/20_scriptfield.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/20_scriptfield.yml index 3a3e5608e02bf..7c3a9d975efeb 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/20_scriptfield.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/20_scriptfield.yml @@ -154,3 +154,32 @@ setup: - match: { error.failed_shards.0.reason.caused_by.reason: "no paths escape from while loop" } - match: { error.failed_shards.0.reason.type: "script_exception" } - match: { error.failed_shards.0.reason.reason: "compile error" } + +--- +"Scripted Field with error accessing an unsupported field via the script fields api": + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + script_fields: + bar: + script: + source: "field('foo').getValue('')" + + - match: { error.failed_shards.0.reason.caused_by.type: "unsupported_operation_exception" } + - match: { error.failed_shards.0.reason.caused_by.reason: "field [foo] is not supported through the fields api, use [doc] instead"} + + - do: + catch: bad_request + search: + rest_total_hits_as_int: true + body: + script_fields: + bar: + script: + source: "field('foo').getValues()" + + - match: { error.failed_shards.0.reason.caused_by.type: "unsupported_operation_exception" } + - match: { error.failed_shards.0.reason.caused_by.reason: "field [foo] is not supported through the fields api, use [doc] instead" } + diff --git a/modules/legacy-geo/licenses/lucene-spatial-extras-9.0.0-snapshot-2719cf6630e.jar.sha1 b/modules/legacy-geo/licenses/lucene-spatial-extras-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..3347628c0e8f6 --- /dev/null +++ b/modules/legacy-geo/licenses/lucene-spatial-extras-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +16f30f67a13f3a947000bbe1a189b0bdbccc6ec3 \ No newline at end of file diff --git a/modules/legacy-geo/licenses/lucene-spatial-extras-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/modules/legacy-geo/licenses/lucene-spatial-extras-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 1c96b8e73ff65..0000000000000 --- a/modules/legacy-geo/licenses/lucene-spatial-extras-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d9f8584667012bf09c446512ee25191b5d00ceed \ No newline at end of file diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index ff585b416b72c..dc7575728fa86 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -18,6 +18,8 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.script.field.DelegateDocValuesField; +import org.elasticsearch.script.field.DocValuesField; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser.Token; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -505,8 +507,8 @@ private static class ScaledFloatLeafFieldData implements LeafNumericFieldData { } @Override - public ScriptDocValues.Doubles getScriptValues() { - return new ScriptDocValues.Doubles(getDoubleValues()); + public DocValuesField getScriptField(String name) { + return new DelegateDocValuesField(new ScriptDocValues.Doubles(getDoubleValues()), name); } @Override diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java index 33cfb45df091b..a74ea304273e3 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryBuilderStoreTests.java @@ -30,7 +30,7 @@ import org.elasticsearch.index.mapper.TestDocumentParserContext; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.TermQueryBuilder; -import org.elasticsearch.mock.orig.Mockito; +import org.mockito.Mockito; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import org.elasticsearch.test.ESTestCase; diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java index 2a8f0a6a9e12d..a06f9a0ac2b45 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java @@ -96,6 +96,7 @@ public void setUpCluster() { internalCluster().startMasterOnlyNode(); // Use a single thread pool for writes so we can enforce a consistent ordering internalCluster().startDataOnlyNode(Settings.builder().put("thread_pool.write.size", 1).build()); + internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); } public void testUpdateByQuery() throws Exception { @@ -214,7 +215,12 @@ Self extends AbstractBulkByScrollRequestBuilder> void executeConcurrent } // The bulk request is enqueued before the update by query - final ActionFuture bulkFuture = client().bulk(conflictingUpdatesBulkRequest); + // Since #77731 TransportBulkAction is dispatched into the Write thread pool, + // this test makes use of a deterministic task order in the data node write + // thread pool. To ensure that ordering, execute the TransportBulkAction + // in a coordinator node preventing that additional tasks are scheduled into + // the data node write thread pool. + final ActionFuture bulkFuture = internalCluster().coordOnlyNodeClient().bulk(conflictingUpdatesBulkRequest); // Ensure that the concurrent writes are enqueued before the update by query request is sent assertBusy(() -> assertThat(writeThreadPool.getQueue().size(), equalTo(1))); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java new file mode 100644 index 0000000000000..09765b472c770 --- /dev/null +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java @@ -0,0 +1,388 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.migration; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusAction; +import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusRequest; +import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse; +import org.elasticsearch.action.admin.cluster.migration.PostFeatureUpgradeAction; +import org.elasticsearch.action.admin.cluster.migration.PostFeatureUpgradeRequest; +import org.elasticsearch.action.admin.cluster.migration.PostFeatureUpgradeResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.upgrades.FeatureMigrationResults; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class FeatureMigrationIT extends ESIntegTestCase { + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).build(); + } + + @Override + protected boolean forbidPrivateIndexSettings() { + // We need to be able to set the index creation version manually. + return false; + } + + @Override + protected Collection> nodePlugins() { + List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(TestPlugin.class); + plugins.add(ReindexPlugin.class); + return plugins; + } + + public void testMigrateInternalManagedSystemIndex() throws Exception { + createSystemIndexForDescriptor(INTERNAL_MANAGED); + createSystemIndexForDescriptor(INTERNAL_UNMANAGED); + createSystemIndexForDescriptor(EXTERNAL_MANAGED); + createSystemIndexForDescriptor(EXTERNAL_UNMANAGED); + + ensureGreen(); + + SetOnce preUpgradeHookCalled = new SetOnce<>(); + SetOnce postUpgradeHookCalled = new SetOnce<>(); + TestPlugin.preMigrationHook.set(clusterState -> { + // Check that the ordering of these calls is correct. + assertThat(postUpgradeHookCalled.get(), nullValue()); + Map metadata = new HashMap<>(); + metadata.put("stringKey", "stringValue"); + metadata.put("intKey", 42); + { + Map innerMetadata = new HashMap<>(); + innerMetadata.put("innerKey", "innerValue"); + + metadata.put("mapKey", innerMetadata); + } + metadata.put("listKey", Arrays.asList(1, 2, 3, 4)); + preUpgradeHookCalled.set(true); + return metadata; + }); + + TestPlugin.postMigrationHook.set((clusterState, metadata) -> { + assertThat(preUpgradeHookCalled.get(), is(true)); + + assertThat( + metadata, + hasEntry("stringKey", "stringValue") + ); + assertThat(metadata, hasEntry("intKey", 42)); + assertThat(metadata, hasEntry("listKey", Arrays.asList(1,2,3,4))); + assertThat(metadata, hasKey("mapKey")); + @SuppressWarnings("unchecked") + Map innerMap = (Map) metadata.get("mapKey"); + assertThat(innerMap, hasEntry("innerKey", "innerValue")); + + // We shouldn't have any results in the cluster state as no features have fully finished yet. + FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, nullValue()); + postUpgradeHookCalled.set(true); + }); + + PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(); + PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, migrationRequest).get(); + assertThat(migrationResponse.getReason(), nullValue()); + assertThat(migrationResponse.getElasticsearchException(), nullValue()); + final Set migratingFeatures = migrationResponse.getFeatures() + .stream() + .map(PostFeatureUpgradeResponse.Feature::getFeatureName) + .collect(Collectors.toSet()); + assertThat(migratingFeatures, hasItem(FEATURE_NAME)); + + GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(); + assertBusy(() -> { + GetFeatureUpgradeStatusResponse statusResponse = client().execute(GetFeatureUpgradeStatusAction.INSTANCE, getStatusRequest) + .get(); + logger.info(Strings.toString(statusResponse)); + assertThat(statusResponse.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); + }); + + assertTrue("the pre-migration hook wasn't actually called", preUpgradeHookCalled.get()); + assertTrue("the post-migration hook wasn't actually called", postUpgradeHookCalled.get()); + + Metadata finalMetadata = client().admin().cluster().prepareState().get().getState().metadata(); + // Check that the results metadata is what we expect. + FeatureMigrationResults currentResults = finalMetadata.custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, notNullValue()); + assertThat(currentResults.getFeatureStatuses(), allOf(aMapWithSize(1), hasKey(FEATURE_NAME))); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).succeeded(), is(true)); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getFailedIndexName(), nullValue()); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getException(), nullValue()); + + assertIndexHasCorrectProperties( + finalMetadata, + ".int-man-old-reindexed-for-8", + INTERNAL_MANAGED_FLAG_VALUE, + true, + true, + Arrays.asList(".int-man-old", ".internal-managed-alias") + ); + assertIndexHasCorrectProperties( + finalMetadata, + ".int-unman-old-reindexed-for-8", + INTERNAL_UNMANAGED_FLAG_VALUE, + false, + true, + Collections.singletonList(".int-unman-old") + ); + assertIndexHasCorrectProperties( + finalMetadata, + ".ext-man-old-reindexed-for-8", + EXTERNAL_MANAGED_FLAG_VALUE, + true, + false, + Arrays.asList(".ext-man-old", ".external-managed-alias") + ); + assertIndexHasCorrectProperties( + finalMetadata, + ".ext-unman-old-reindexed-for-8", + EXTERNAL_UNMANAGED_FLAG_VALUE, + false, + false, + Collections.singletonList(".ext-unman-old") + ); + } + + public void assertIndexHasCorrectProperties( + Metadata metadata, + String indexName, + int settingsFlagValue, + boolean isManaged, + boolean isInternal, + Collection aliasNames) { + IndexMetadata imd = metadata.index(indexName); + assertThat(imd.getSettings().get(FlAG_SETTING_KEY), equalTo(Integer.toString(settingsFlagValue))); + final Map mapping = imd.mapping().getSourceAsMap(); + @SuppressWarnings("unchecked") + final Map meta = (Map) mapping.get("_meta"); + assertThat(meta.get(DESCRIPTOR_MANAGED_META_KEY), is(isManaged)); + assertThat(meta.get(DESCRIPTOR_INTERNAL_META_KEY), is(isInternal)); + + assertThat(imd.isSystem(), is(true)); + + Set actualAliasNames = imd.getAliases().keySet(); + assertThat(actualAliasNames, containsInAnyOrder(aliasNames.toArray())); + + IndicesStatsResponse indexStats = client().admin().indices().prepareStats(imd.getIndex().getName()).setDocs(true).get(); + assertThat(indexStats.getIndex(imd.getIndex().getName()).getTotal().getDocs().getCount(), is((long) INDEX_DOC_COUNT)); + } + + public void createSystemIndexForDescriptor(SystemIndexDescriptor descriptor) throws InterruptedException { + assertTrue( + "the strategy used below to create index names for descriptors without a primary index name only works for simple patterns", + descriptor.getIndexPattern().endsWith("*") + ); + String indexName = Optional.ofNullable(descriptor.getPrimaryIndex()).orElse(descriptor.getIndexPattern().replace("*", "old")); + CreateIndexRequestBuilder createRequest = prepareCreate(indexName); + createRequest.setWaitForActiveShards(ActiveShardCount.ALL); + if (descriptor.getSettings() != null) { + createRequest.setSettings(Settings.builder().put("index.version.created", Version.CURRENT).build()); + } else { + createRequest.setSettings( + createSimpleSettings( + Version.V_7_0_0, + descriptor.isInternal() ? INTERNAL_UNMANAGED_FLAG_VALUE : EXTERNAL_UNMANAGED_FLAG_VALUE + ) + ); + } + if (descriptor.getMappings() == null) { + createRequest.setMapping(createSimpleMapping(false, descriptor.isInternal())); + } + CreateIndexResponse response = createRequest.get(); + assertTrue(response.isShardsAcknowledged()); + + List docs = new ArrayList<>(INDEX_DOC_COUNT); + for (int i = 0; i < INDEX_DOC_COUNT; i++) { + docs.add(client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("some_field", "words words")); + } + indexRandom(true, docs); + IndicesStatsResponse indexStats = client().admin().indices().prepareStats(indexName).setDocs(true).get(); + assertThat(indexStats.getIndex(indexName).getTotal().getDocs().getCount(), is((long) INDEX_DOC_COUNT)); + } + + static final String VERSION_META_KEY = "version"; + static final Version META_VERSION = Version.CURRENT; + static final String DESCRIPTOR_MANAGED_META_KEY = "desciptor_managed"; + static final String DESCRIPTOR_INTERNAL_META_KEY = "descriptor_internal"; + static final String FEATURE_NAME = "A-test-feature"; // Sorts alphabetically before the feature from MultiFeatureMigrationIT + static final String ORIGIN = FeatureMigrationIT.class.getSimpleName(); + static final String FlAG_SETTING_KEY = IndexMetadata.INDEX_PRIORITY_SETTING.getKey(); + static final int INDEX_DOC_COUNT = 100; // arbitrarily chosen + + static final int INTERNAL_MANAGED_FLAG_VALUE = 1; + static final int INTERNAL_UNMANAGED_FLAG_VALUE = 2; + static final int EXTERNAL_MANAGED_FLAG_VALUE = 3; + static final int EXTERNAL_UNMANAGED_FLAG_VALUE = 4; + static final SystemIndexDescriptor INTERNAL_MANAGED = SystemIndexDescriptor.builder() + .setIndexPattern(".int-man-*") + .setAliasName(".internal-managed-alias") + .setPrimaryIndex(".int-man-old") + .setType(SystemIndexDescriptor.Type.INTERNAL_MANAGED) + .setSettings(createSimpleSettings(Version.V_7_0_0, INTERNAL_MANAGED_FLAG_VALUE)) + .setMappings(createSimpleMapping(true, true)) + .setOrigin(ORIGIN) + .setVersionMetaKey(VERSION_META_KEY) + .setAllowedElasticProductOrigins(Collections.emptyList()) + .setMinimumNodeVersion(Version.V_7_0_0) + .setPriorSystemIndexDescriptors(Collections.emptyList()) + .build(); + static final SystemIndexDescriptor INTERNAL_UNMANAGED = SystemIndexDescriptor.builder() + .setIndexPattern(".int-unman-*") + .setType(SystemIndexDescriptor.Type.INTERNAL_UNMANAGED) + .setOrigin(ORIGIN) + .setVersionMetaKey(VERSION_META_KEY) + .setAllowedElasticProductOrigins(Collections.emptyList()) + .setMinimumNodeVersion(Version.V_7_0_0) + .setPriorSystemIndexDescriptors(Collections.emptyList()) + .build(); + static final SystemIndexDescriptor EXTERNAL_MANAGED = SystemIndexDescriptor.builder() + .setIndexPattern(".ext-man-*") + .setAliasName(".external-managed-alias") + .setPrimaryIndex(".ext-man-old") + .setType(SystemIndexDescriptor.Type.EXTERNAL_MANAGED) + .setSettings(createSimpleSettings(Version.V_7_0_0, EXTERNAL_MANAGED_FLAG_VALUE)) + .setMappings(createSimpleMapping(true, false)) + .setOrigin(ORIGIN) + .setVersionMetaKey(VERSION_META_KEY) + .setAllowedElasticProductOrigins(Collections.singletonList(ORIGIN)) + .setMinimumNodeVersion(Version.V_7_0_0) + .setPriorSystemIndexDescriptors(Collections.emptyList()) + .build(); + static final SystemIndexDescriptor EXTERNAL_UNMANAGED = SystemIndexDescriptor.builder() + .setIndexPattern(".ext-unman-*") + .setType(SystemIndexDescriptor.Type.EXTERNAL_UNMANAGED) + .setOrigin(ORIGIN) + .setVersionMetaKey(VERSION_META_KEY) + .setAllowedElasticProductOrigins(Collections.singletonList(ORIGIN)) + .setMinimumNodeVersion(Version.V_7_0_0) + .setPriorSystemIndexDescriptors(Collections.emptyList()) + .build(); + + static Settings createSimpleSettings(Version creationVersion, int flagSettingValue) { + return Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .put(FlAG_SETTING_KEY, flagSettingValue) + .put("index.version.created", creationVersion) + .build(); + } + + static String createSimpleMapping(boolean descriptorManaged, boolean descriptorInternal) { + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.startObject("_meta"); + builder.field(VERSION_META_KEY, META_VERSION); + builder.field(DESCRIPTOR_MANAGED_META_KEY, descriptorManaged); + builder.field(DESCRIPTOR_INTERNAL_META_KEY, descriptorInternal); + builder.endObject(); + + builder.field("dynamic", "strict"); + builder.startObject("properties"); + { + builder.startObject("some_field"); + builder.field("type", "keyword"); + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + return Strings.toString(builder); + } catch (IOException e) { + // Just rethrow, it should be impossible for this to throw here + throw new AssertionError(e); + } + } + + public static class TestPlugin extends Plugin implements SystemIndexPlugin { + public static final AtomicReference>> preMigrationHook = new AtomicReference<>(); + public static final AtomicReference>> postMigrationHook = new AtomicReference<>(); + + public TestPlugin() { + + } + + @Override + public String getFeatureName() { + return FEATURE_NAME; + } + + @Override + public String getFeatureDescription() { + return "a plugin for testing system index migration"; + } + + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + return Arrays.asList(INTERNAL_MANAGED, INTERNAL_UNMANAGED, EXTERNAL_MANAGED, EXTERNAL_UNMANAGED); + } + + @Override + public void prepareForIndicesMigration(ClusterService clusterService, Client client, ActionListener> listener) { + listener.onResponse(preMigrationHook.get().apply(clusterService.state())); + } + + @Override + public void indicesMigrationComplete( + Map preUpgradeMetadata, + ClusterService clusterService, + Client client, + ActionListener listener + ) { + postMigrationHook.get().accept(clusterService.state(), preUpgradeMetadata); + listener.onResponse(true); + } + } +} diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java new file mode 100644 index 0000000000000..832990a973e53 --- /dev/null +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java @@ -0,0 +1,302 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.migration; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusAction; +import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusRequest; +import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse; +import org.elasticsearch.action.admin.cluster.migration.PostFeatureUpgradeAction; +import org.elasticsearch.action.admin.cluster.migration.PostFeatureUpgradeRequest; +import org.elasticsearch.action.admin.cluster.migration.PostFeatureUpgradeResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.upgrades.FeatureMigrationResults; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class MultiFeatureMigrationIT extends FeatureMigrationIT { + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).build(); + } + + @Override + protected boolean forbidPrivateIndexSettings() { + // We need to be able to set the index creation version manually. + return false; + } + + @Override + protected Collection> nodePlugins() { + List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(FeatureMigrationIT.TestPlugin.class); + plugins.add(SecondPlugin.class); + plugins.add(ReindexPlugin.class); + return plugins; + } + + // Sorts alphabetically after the feature from MultiFeatureMigrationIT + private static final String SECOND_FEATURE_NAME = "B-test-feature"; + private static final String ORIGIN = MultiFeatureMigrationIT.class.getSimpleName(); + private static final String VERSION_META_KEY = "version"; + static final int SECOND_FEATURE_IDX_FLAG_VALUE = 0; + + public void testMultipleFeatureMigration() throws Exception { + // All the indices from FeatureMigrationIT + createSystemIndexForDescriptor(INTERNAL_MANAGED); + createSystemIndexForDescriptor(INTERNAL_UNMANAGED); + createSystemIndexForDescriptor(EXTERNAL_MANAGED); + createSystemIndexForDescriptor(EXTERNAL_UNMANAGED); + // And our new one + createSystemIndexForDescriptor(SECOND_FEATURE_IDX_DESCIPTOR); + + ensureGreen(); + + SetOnce preMigrationHookCalled = new SetOnce<>(); + SetOnce postMigrationHookCalled = new SetOnce<>(); + SetOnce secondPluginPreMigrationHookCalled = new SetOnce<>(); + SetOnce secondPluginPostMigrationHookCalled = new SetOnce<>(); + + TestPlugin.preMigrationHook.set(clusterState -> { + // None of the other hooks should have been called yet. + assertThat(postMigrationHookCalled.get(), nullValue()); + assertThat(secondPluginPreMigrationHookCalled.get(), nullValue()); + assertThat(secondPluginPostMigrationHookCalled.get(), nullValue()); + Map metadata = new HashMap<>(); + metadata.put("stringKey", "first plugin value"); + + // We shouldn't have any results in the cluster state given no features have finished yet. + FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, nullValue()); + + preMigrationHookCalled.set(true); + return metadata; + }); + + TestPlugin.postMigrationHook.set((clusterState, metadata) -> { + // Check that the hooks have been called or not as expected. + assertThat(preMigrationHookCalled.get(), is(true)); + assertThat(secondPluginPreMigrationHookCalled.get(), nullValue()); + assertThat(secondPluginPostMigrationHookCalled.get(), nullValue()); + + assertThat( + metadata, + hasEntry("stringKey", "first plugin value") + ); + + // We shouldn't have any results in the cluster state given no features have finished yet. + FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, nullValue()); + + postMigrationHookCalled.set(true); + }); + + SecondPlugin.preMigrationHook.set(clusterState -> { + // Check that the hooks have been called or not as expected. + assertThat(preMigrationHookCalled.get(), is(true)); + assertThat(postMigrationHookCalled.get(), is(true)); + assertThat(secondPluginPostMigrationHookCalled.get(), nullValue()); + + Map metadata = new HashMap<>(); + metadata.put("stringKey", "second plugin value"); + + // But now, we should have results, as we're in a new feature! + FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, notNullValue()); + assertThat(currentResults.getFeatureStatuses(), allOf(aMapWithSize(1), hasKey(FEATURE_NAME))); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).succeeded(), is(true)); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getFailedIndexName(), nullValue()); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getException(), nullValue()); + + secondPluginPreMigrationHookCalled.set(true); + return metadata; + }); + + SecondPlugin.postMigrationHook.set((clusterState, metadata) -> { + // Check that the hooks have been called or not as expected. + assertThat(preMigrationHookCalled.get(), is(true)); + assertThat(postMigrationHookCalled.get(), is(true)); + assertThat(secondPluginPreMigrationHookCalled.get(), is(true)); + + assertThat( + metadata, + hasEntry("stringKey", "second plugin value") + ); + + // And here, the results should be the same, as we haven't updated the state with this feature's status yet. + FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, notNullValue()); + assertThat(currentResults.getFeatureStatuses(), allOf(aMapWithSize(1), hasKey(FEATURE_NAME))); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).succeeded(), is(true)); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getFailedIndexName(), nullValue()); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getException(), nullValue()); + + secondPluginPostMigrationHookCalled.set(true); + }); + + PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(); + PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, migrationRequest).get(); + assertThat(migrationResponse.getReason(), nullValue()); + assertThat(migrationResponse.getElasticsearchException(), nullValue()); + final Set migratingFeatures = migrationResponse.getFeatures() + .stream() + .map(PostFeatureUpgradeResponse.Feature::getFeatureName) + .collect(Collectors.toSet()); + assertThat(migratingFeatures, hasItems(FEATURE_NAME, SECOND_FEATURE_NAME)); + + GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(); + assertBusy(() -> { + GetFeatureUpgradeStatusResponse statusResponse = client().execute(GetFeatureUpgradeStatusAction.INSTANCE, getStatusRequest) + .get(); + logger.info(Strings.toString(statusResponse)); + assertThat(statusResponse.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); + }); + + assertTrue("the first plugin's pre-migration hook wasn't actually called", preMigrationHookCalled.get()); + assertTrue("the first plugin's post-migration hook wasn't actually called", postMigrationHookCalled.get()); + + assertTrue("the second plugin's pre-migration hook wasn't actually called", secondPluginPreMigrationHookCalled.get()); + assertTrue("the second plugin's post-migration hook wasn't actually called", secondPluginPostMigrationHookCalled.get()); + + Metadata finalMetadata = client().admin().cluster().prepareState().get().getState().metadata(); + // Check that the results metadata is what we expect + FeatureMigrationResults currentResults = finalMetadata.custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, notNullValue()); + assertThat(currentResults.getFeatureStatuses(), allOf(aMapWithSize(2), hasKey(FEATURE_NAME), hasKey(SECOND_FEATURE_NAME))); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).succeeded(), is(true)); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getFailedIndexName(), nullValue()); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getException(), nullValue()); + assertThat(currentResults.getFeatureStatuses().get(SECOND_FEATURE_NAME).succeeded(), is(true)); + assertThat(currentResults.getFeatureStatuses().get(SECOND_FEATURE_NAME).getFailedIndexName(), nullValue()); + assertThat(currentResults.getFeatureStatuses().get(SECOND_FEATURE_NAME).getException(), nullValue()); + + // Finally, verify that all the indices exist and have the properties we expect. + assertIndexHasCorrectProperties( + finalMetadata, + ".int-man-old-reindexed-for-8", + INTERNAL_MANAGED_FLAG_VALUE, + true, + true, + Arrays.asList(".int-man-old", ".internal-managed-alias") + ); + assertIndexHasCorrectProperties( + finalMetadata, + ".int-unman-old-reindexed-for-8", + INTERNAL_UNMANAGED_FLAG_VALUE, + false, + true, + Collections.singletonList(".int-unman-old") + ); + assertIndexHasCorrectProperties( + finalMetadata, + ".ext-man-old-reindexed-for-8", + EXTERNAL_MANAGED_FLAG_VALUE, + true, + false, + Arrays.asList(".ext-man-old", ".external-managed-alias") + ); + assertIndexHasCorrectProperties( + finalMetadata, + ".ext-unman-old-reindexed-for-8", + EXTERNAL_UNMANAGED_FLAG_VALUE, + false, + false, + Collections.singletonList(".ext-unman-old") + ); + + assertIndexHasCorrectProperties( + finalMetadata, + ".second-int-man-old-reindexed-for-8", + SECOND_FEATURE_IDX_FLAG_VALUE, + true, + true, + Arrays.asList(".second-int-man-old", ".second-internal-managed-alias") + ); + } + + private static final SystemIndexDescriptor SECOND_FEATURE_IDX_DESCIPTOR = SystemIndexDescriptor.builder() + .setIndexPattern(".second-int-man-*") + .setAliasName(".second-internal-managed-alias") + .setPrimaryIndex(".second-int-man-old") + .setType(SystemIndexDescriptor.Type.INTERNAL_MANAGED) + .setSettings(createSimpleSettings(Version.V_7_0_0, 0)) + .setMappings(createSimpleMapping(true, true)) + .setOrigin(ORIGIN) + .setVersionMetaKey(VERSION_META_KEY) + .setAllowedElasticProductOrigins(Collections.emptyList()) + .setMinimumNodeVersion(Version.V_7_0_0) + .setPriorSystemIndexDescriptors(Collections.emptyList()) + .build(); + + public static class SecondPlugin extends Plugin implements SystemIndexPlugin { + + private static final AtomicReference>> preMigrationHook = new AtomicReference<>(); + private static final AtomicReference>> postMigrationHook = new AtomicReference<>(); + + public SecondPlugin() { + + } + + @Override public String getFeatureName() { + return SECOND_FEATURE_NAME; + } + + @Override public String getFeatureDescription() { + return "a plugin for test system index migration with multiple features"; + } + + @Override public Collection getSystemIndexDescriptors(Settings settings) { + return Collections.singletonList(SECOND_FEATURE_IDX_DESCIPTOR); + } + + @Override public void prepareForIndicesMigration( + ClusterService clusterService, Client client, ActionListener> listener) { + listener.onResponse(preMigrationHook.get().apply(clusterService.state())); + } + + @Override public void indicesMigrationComplete( + Map preUpgradeMetadata, ClusterService clusterService, Client client, ActionListener listener) { + postMigrationHook.get().accept(clusterService.state(), preUpgradeMetadata); + listener.onResponse(true); + } + } +} diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.0.0-snapshot-2719cf6630e.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..07c46ac055ea9 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +7d029029ce95599bd791a07fb8c9e2f00865d7a5 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 8c6f53bfae64d..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -91d7bb3e0f54577efbd08f8527ab705f76b405be \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.0.0-snapshot-2719cf6630e.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..742b6f43722aa --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +aaae0ea657ebedc350402dd3344e56e33c79724d \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index dc92541178560..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2e37f30126a2ddc0231b929b82b2bc2e120af1c2 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.0.0-snapshot-2719cf6630e.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..767290ec95e3b --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +f2499fe88398e457a801cfd393db90f12b2ff149 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 0a00260a3592c..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -efae6bf6515c2d767491b85a50946279f66e9836 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.0.0-snapshot-2719cf6630e.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..1742ae6d503cf --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +49e196efaf31e16aadbf8a630df705929b7778c9 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index cf21023f7195d..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e50a65b19d22bc98ca57a9426c45138c71787154 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.0.0-snapshot-2719cf6630e.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..1565583d925aa --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +d76ed543a88428e9c78b30b91050768acb26c7a7 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 4bfee0a4dabe1..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6de256a78d4c8838d70f4b3720ba9fb6242a20bf \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.0.0-snapshot-2719cf6630e.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..bab205f782a05 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +039d1d5ba495418bb0079083a17ed3345948a8aa \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 2c7e1a52098af..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -48030e67171008e5d76fb211091a2bd4d256f097 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.0.0-snapshot-2719cf6630e.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..bfbc0e77505f9 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +788516cec7f01c6e6c4bbb5df2eb1b9963312827 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 2d8b0dc4dd2a5..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21a775f1b3a2912c02ac64ceb23746b259694938 \ No newline at end of file diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java index 7a73db5d37081..bf7c363d94ae7 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImplTests.java @@ -14,7 +14,10 @@ import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.BasicSessionCredentials; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; + +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.test.ESTestCase; @@ -59,8 +62,9 @@ public void testDeprecationOfLoneAccessKey() { Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build())).getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("aws_key")); assertThat(credentials.getAWSSecretKey(), is("")); - assertSettingDeprecationsAndWarnings(new String[]{}, - "Setting [discovery.ec2.access_key] is set but [discovery.ec2.secret_key] is not, which will be unsupported in future"); + assertSettingDeprecationsAndWarnings(new Setting[]{}, + new DeprecationWarning(DeprecationLogger.CRITICAL, "Setting [discovery.ec2.access_key] is set but " + + "[discovery.ec2.secret_key] is not, which will be unsupported in future")); } public void testDeprecationOfLoneSecretKey() { @@ -70,8 +74,9 @@ public void testDeprecationOfLoneSecretKey() { Ec2ClientSettings.getClientSettings(Settings.builder().setSecureSettings(secureSettings).build())).getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is("")); assertThat(credentials.getAWSSecretKey(), is("aws_secret")); - assertSettingDeprecationsAndWarnings(new String[]{}, - "Setting [discovery.ec2.secret_key] is set but [discovery.ec2.access_key] is not, which will be unsupported in future"); + assertSettingDeprecationsAndWarnings(new Setting[]{}, + new DeprecationWarning(DeprecationLogger.CRITICAL, "Setting [discovery.ec2.secret_key] is set but " + + "[discovery.ec2.access_key] is not, which will be unsupported in future")); } public void testRejectionOfLoneSessionToken() { diff --git a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java index f071407005f98..22857331f896c 100644 --- a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java +++ b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java @@ -44,11 +44,13 @@ public final class AttachmentProcessor extends AbstractProcessor { private final Set properties; private final int indexedChars; private final boolean ignoreMissing; + private final boolean removeBinary; private final String indexedCharsField; private final String resourceName; AttachmentProcessor(String tag, String description, String field, String targetField, Set properties, - int indexedChars, boolean ignoreMissing, String indexedCharsField, String resourceName) { + int indexedChars, boolean ignoreMissing, String indexedCharsField, String resourceName, + boolean removeBinary) { super(tag, description); this.field = field; this.targetField = targetField; @@ -57,12 +59,18 @@ public final class AttachmentProcessor extends AbstractProcessor { this.ignoreMissing = ignoreMissing; this.indexedCharsField = indexedCharsField; this.resourceName = resourceName; + this.removeBinary = removeBinary; } boolean isIgnoreMissing() { return ignoreMissing; } + // For tests only + boolean isRemoveBinary() { + return removeBinary; + } + @Override public IngestDocument execute(IngestDocument ingestDocument) { Map additionalFields = new HashMap<>(); @@ -162,6 +170,10 @@ public IngestDocument execute(IngestDocument ingestDocument) { } ingestDocument.setFieldValue(targetField, additionalFields); + + if (removeBinary) { + ingestDocument.removeField(field); + } return ingestDocument; } @@ -200,6 +212,7 @@ public AttachmentProcessor create(Map registry, Strin int indexedChars = readIntProperty(TYPE, processorTag, config, "indexed_chars", NUMBER_OF_CHARS_INDEXED); boolean ignoreMissing = readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); String indexedCharsField = readOptionalStringProperty(TYPE, processorTag, config, "indexed_chars_field"); + boolean removeBinary = readBooleanProperty(TYPE, processorTag, config, "remove_binary", false); final Set properties; if (propertyNames != null) { @@ -217,7 +230,7 @@ public AttachmentProcessor create(Map registry, Strin } return new AttachmentProcessor(processorTag, description, field, targetField, properties, indexedChars, ignoreMissing, - indexedCharsField, resourceName); + indexedCharsField, resourceName, removeBinary); } } diff --git a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorFactoryTests.java b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorFactoryTests.java index d0288fdc4d75f..45c3407020dcc 100644 --- a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorFactoryTests.java +++ b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorFactoryTests.java @@ -124,4 +124,19 @@ public void testIgnoreMissing() throws Exception { assertThat(processor.getProperties(), sameInstance(AttachmentProcessor.Factory.DEFAULT_PROPERTIES)); assertTrue(processor.isIgnoreMissing()); } + + public void testRemoveBinary() throws Exception { + Map config = new HashMap<>(); + config.put("field", "_field"); + config.put("remove_binary", true); + + String processorTag = randomAlphaOfLength(10); + + AttachmentProcessor processor = factory.create(null, processorTag, null, config); + assertThat(processor.getTag(), equalTo(processorTag)); + assertThat(processor.getField(), equalTo("_field")); + assertThat(processor.getTargetField(), equalTo("attachment")); + assertThat(processor.getProperties(), sameInstance(AttachmentProcessor.Factory.DEFAULT_PROPERTIES)); + assertTrue(processor.isRemoveBinary()); + } } diff --git a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java index 4291a54a9149b..63197de7f7981 100644 --- a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java +++ b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java @@ -44,7 +44,7 @@ public class AttachmentProcessorTests extends ESTestCase { @Before public void createStandardProcessor() { processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", - "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 10000, false, null, null); + "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 10000, false, null, null, false); } public void testEnglishTextDocument() throws Exception { @@ -77,7 +77,7 @@ public void testHtmlDocumentWithRandomFields() throws Exception { selectedProperties.add(AttachmentProcessor.Property.DATE); } processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", - "target_field", selectedProperties, 10000, false, null, null); + "target_field", selectedProperties, 10000, false, null, null, false); Map attachmentData = parseDocument("htmlWithEmptyDateMeta.html", processor); assertThat(attachmentData.keySet(), hasSize(selectedFieldNames.length)); @@ -237,7 +237,7 @@ public void testNullValueWithIgnoreMissing() throws Exception { Collections.singletonMap("source_field", null)); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", - "randomTarget", null, 10, true, null, null); + "randomTarget", null, 10, true, null, null, false); processor.execute(ingestDocument); assertIngestDocument(originalIngestDocument, ingestDocument); } @@ -246,7 +246,7 @@ public void testNonExistentWithIgnoreMissing() throws Exception { IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", - "randomTarget", null, 10, true, null, null); + "randomTarget", null, 10, true, null, null, false); processor.execute(ingestDocument); assertIngestDocument(originalIngestDocument, ingestDocument); } @@ -256,7 +256,7 @@ public void testNullWithoutIgnoreMissing() throws Exception { Collections.singletonMap("source_field", null)); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", - "randomTarget", null, 10, false, null, null); + "randomTarget", null, 10, false, null, null, false); Exception exception = expectThrows(Exception.class, () -> processor.execute(ingestDocument)); assertThat(exception.getMessage(), equalTo("field [source_field] is null, cannot parse.")); } @@ -265,7 +265,7 @@ public void testNonExistentWithoutIgnoreMissing() throws Exception { IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); Processor processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", - "randomTarget", null, 10, false, null, null); + "randomTarget", null, 10, false, null, null, false); Exception exception = expectThrows(Exception.class, () -> processor.execute(ingestDocument)); assertThat(exception.getMessage(), equalTo("field [source_field] not present as part of path [source_field]")); } @@ -299,7 +299,7 @@ private Map parseDocument(String file, AttachmentProcessor proce public void testIndexedChars() throws Exception { processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", - "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 19, false, null, null); + "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 19, false, null, null, false); Map attachmentData = parseDocument("text-in-english.txt", processor); @@ -310,7 +310,7 @@ public void testIndexedChars() throws Exception { assertThat(attachmentData.get("content_length"), is(19L)); processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", - "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 19, false, "max_length", null); + "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 19, false, "max_length", null, false); attachmentData = parseDocument("text-in-english.txt", processor); @@ -341,7 +341,7 @@ public void testIndexedChars() throws Exception { public void testIndexedCharsWithResourceName() throws Exception { processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 100, - false, null, "resource_name"); + false, null, "resource_name", false); Map attachmentData = parseDocument("text-cjk-big5.txt", processor, Collections.singletonMap("max_length", 100), true); @@ -369,6 +369,27 @@ public void testIndexedCharsWithResourceName() throws Exception { assertThat(attachmentData.get("content_length"), is(100L)); } + public void testRemoveBinary() throws Exception { + { + // Test the default behavior. + Map document = new HashMap<>(); + document.put("source_field", getAsBinaryOrBase64("text-in-english.txt")); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField("source_field"), is(true)); + } + { + // Remove the binary field. + processor = new AttachmentProcessor(randomAlphaOfLength(10), null, "source_field", + "target_field", EnumSet.allOf(AttachmentProcessor.Property.class), 10000, false, null, null, true); + Map document = new HashMap<>(); + document.put("source_field", getAsBinaryOrBase64("text-in-english.txt")); + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField("source_field"), is(false)); + } + } + private Object getAsBinaryOrBase64(String filename) throws Exception { String path = "/org/elasticsearch/ingest/attachment/test/sample-files/" + filename; try (InputStream is = AttachmentProcessorTests.class.getResourceAsStream(path)) { diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index 8ac33a98e413d..562e974a875d1 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -9,6 +9,7 @@ package org.elasticsearch.repositories.azure; import com.azure.core.util.serializer.JacksonAdapter; + import org.apache.lucene.util.SetOnce; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -16,10 +17,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.indices.recovery.RecoverySettings; @@ -33,6 +32,7 @@ import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xcontent.NamedXContentRegistry; import java.util.Arrays; import java.util.Collection; @@ -132,9 +132,6 @@ public static ExecutorBuilder nettyEventLoopExecutorBuilder(Settings settings public void reload(Settings settings) { // secure settings should be readable final Map clientsSettings = AzureStorageSettings.load(settings); - if (clientsSettings.isEmpty()) { - throw new SettingsException("If you want to use an azure repository, you need to define a client configuration."); - } AzureStorageService storageService = azureStoreService.get(); assert storageService != null; storageService.refreshSettings(clientsSettings); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index 7a5bc51564c88..1d06ac19881d2 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.repositories.azure; import com.azure.storage.common.policy.RequestRetryOptions; + import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; @@ -163,14 +164,14 @@ public void testReinitClientEmptySettings() throws IOException { final AzureStorageService azureStorageService = plugin.azureStoreService.get(); AzureBlobServiceClient client11 = azureStorageService.client("azure1", LocationMode.PRIMARY_ONLY); assertThat(client11.getSyncClient().getAccountUrl(), equalTo("https://myaccount1.blob.core.windows.net")); - // reinit with empty settings - final SettingsException e = expectThrows(SettingsException.class, () -> plugin.reload(Settings.EMPTY)); - assertThat(e.getMessage(), is("If you want to use an azure repository, you need to define a client configuration.")); + // reinit with empty settings is okay + plugin.reload(Settings.EMPTY); // existing client untouched assertThat(client11.getSyncClient().getAccountUrl(), equalTo("https://myaccount1.blob.core.windows.net")); - // new client also untouched - AzureBlobServiceClient client21 = azureStorageService.client("azure1", LocationMode.PRIMARY_ONLY); - assertThat(client21.getSyncClient().getAccountUrl(), equalTo("https://myaccount1.blob.core.windows.net")); + // client is no longer registered + final SettingsException e = + expectThrows(SettingsException.class, () -> azureStorageService.client("azure1", LocationMode.PRIMARY_ONLY)); + assertThat(e.getMessage(), equalTo("Unable to find client with name [azure1]")); } } diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 11deffb908528..3f8ceb3290e60 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -156,7 +156,8 @@ public void testEnforcedCooldownPeriod() throws IOException { SnapshotState.SUCCESS, SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION.minimumCompatibilityVersion(), 0L, // -1 would refresh RepositoryData and find the real version - 0L // -1 would refresh RepositoryData and find the real version + 0L, // -1 would refresh RepositoryData and find the real version, + "" // null would refresh RepositoryData and find the real version ))); final BytesReference serialized = BytesReference.bytes(modifiedRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), SnapshotsService.OLD_SNAPSHOT_FORMAT)); diff --git a/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java b/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java index 72d86df63beaf..c748bee16c0e2 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java +++ b/qa/ccs-rolling-upgrade-remote-cluster/src/test/java/org/elasticsearch/upgrades/SearchStatesIT.java @@ -68,6 +68,7 @@ * This test ensure that we keep the search states of a CCS request correctly when the local and remote clusters * have different but compatible versions. See SearchService#createAndPutReaderContext */ +@SuppressWarnings("removal") public class SearchStatesIT extends ESRestTestCase { private static final Logger LOGGER = LogManager.getLogger(SearchStatesIT.class); @@ -198,7 +199,7 @@ static int indexDocs(RestHighLevelClient client, String index, int numDocs) thro return numDocs; } - void verifySearch(String localIndex, int localNumDocs, String remoteIndex, int remoteNumDocs) { + void verifySearch(String localIndex, int localNumDocs, String remoteIndex, int remoteNumDocs, Integer preFilterShardSize) { try (RestHighLevelClient localClient = newLocalClient()) { Request request = new Request("POST", "/_search"); final int expectedDocs; @@ -212,6 +213,12 @@ void verifySearch(String localIndex, int localNumDocs, String remoteIndex, int r if (UPGRADE_FROM_VERSION.onOrAfter(Version.V_7_0_0)) { request.addParameter("ccs_minimize_roundtrips", Boolean.toString(randomBoolean())); } + if (preFilterShardSize == null && randomBoolean()) { + preFilterShardSize = randomIntBetween(1, 100); + } + if (preFilterShardSize != null) { + request.addParameter("pre_filter_shard_size", Integer.toString(preFilterShardSize)); + } int size = between(1, 100); request.setJsonEntity("{\"sort\": \"f\", \"size\": " + size + "}"); Response response = localClient.getLowLevelClient().performRequest(request); @@ -245,7 +252,32 @@ public void testBWCSearchStates() throws Exception { configureRemoteClusters(getNodes(remoteClient.getLowLevelClient())); int iterations = between(1, 20); for (int i = 0; i < iterations; i++) { - verifySearch(localIndex, localNumDocs, CLUSTER_ALIAS + ":" + remoteIndex, remoteNumDocs); + verifySearch(localIndex, localNumDocs, CLUSTER_ALIAS + ":" + remoteIndex, remoteNumDocs, null); + } + localClient.indices().delete(new DeleteIndexRequest(localIndex), RequestOptions.DEFAULT); + remoteClient.indices().delete(new DeleteIndexRequest(remoteIndex), RequestOptions.DEFAULT); + } + } + + public void testCanMatch() throws Exception { + String localIndex = "test_can_match_local_index"; + String remoteIndex = "test_can_match_remote_index"; + try (RestHighLevelClient localClient = newLocalClient(); + RestHighLevelClient remoteClient = newRemoteClient()) { + localClient.indices().create(new CreateIndexRequest(localIndex) + .settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(5, 20))), + RequestOptions.DEFAULT); + int localNumDocs = indexDocs(localClient, localIndex, between(10, 100)); + + remoteClient.indices().create(new CreateIndexRequest(remoteIndex) + .settings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(5, 20))), + RequestOptions.DEFAULT); + int remoteNumDocs = indexDocs(remoteClient, remoteIndex, between(10, 100)); + + configureRemoteClusters(getNodes(remoteClient.getLowLevelClient())); + int iterations = between(1, 10); + for (int i = 0; i < iterations; i++) { + verifySearch(localIndex, localNumDocs, CLUSTER_ALIAS + ":" + remoteIndex, remoteNumDocs, between(1, 10)); } localClient.indices().delete(new DeleteIndexRequest(localIndex), RequestOptions.DEFAULT); remoteClient.indices().delete(new DeleteIndexRequest(remoteIndex), RequestOptions.DEFAULT); diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index d391d81860369..33808a7edb6d0 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -62,6 +62,7 @@ import static org.hamcrest.CoreMatchers.containsString; +@SuppressWarnings("removal") public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { private static RestHighLevelClient restHighLevelClient; diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index 6ae77b9ba4ac5..440f3f619d7cb 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -23,6 +23,9 @@ BuildParams.bwcVersions.withIndexCompatiple { bwcVersion, baseName -> setting 'indices.memory.shard_inactive_time', '60m' setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" setting 'xpack.security.enabled', 'false' + if (BuildParams.isSnapshotBuild() == false) { + systemProperty 'es.index_mode_feature_flag_registered', 'true' + } } tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java index 1c0cf3f973c9b..d15edec2698f9 100644 --- a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -100,7 +100,7 @@ public void testDeprecationWarnMessage() throws IOException { ); } - assertWarnings("deprecated warn message1"); + assertWarnings(true, new DeprecationWarning(Level.WARN, "deprecated warn message1")) ; } public void testDeprecatedMessageWithoutXOpaqueId() throws IOException { diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/HotThreadsIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/HotThreadsIT.java new file mode 100644 index 0000000000000..9c050bb4d69a1 --- /dev/null +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/HotThreadsIT.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.backwards; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.test.rest.ESRestTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class HotThreadsIT extends ESRestTestCase { + + public void testHotThreads() throws Exception { + final IndexingIT.Nodes nodes = IndexingIT.buildNodeAndVersions(client()); + assumeFalse("no new node found", nodes.getNewNodes().isEmpty()); + assumeFalse("no bwc node found", nodes.getBWCNodes().isEmpty()); + assumeTrue("new nodes are higher version than BWC nodes", + nodes.getNewNodes().get(0).getVersion().compareTo(nodes.getBWCNodes().get(0).getVersion()) > 0); + final Request request = new Request("GET", "/_nodes/hot_threads"); + final Response response = client().performRequest(request); + final String responseString = EntityUtils.toString(response.getEntity()); + final String[] nodeResponses = responseString.split("::: "); + int respondedNodes = 0; + for (String nodeResponse : nodeResponses) { + final String[] lines = nodeResponse.split("\n"); + final String nodeId = lines[0].trim(); + if (nodeId.isEmpty() == false) { + respondedNodes++; + } + } + assertThat(respondedNodes, equalTo(nodes.getNewNodes().size() + nodes.getBWCNodes().size())); + } +} diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index c2b9dbdb44462..dc764b5f20dd3 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; @@ -77,6 +76,7 @@ import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.XContentType; import org.junit.AfterClass; import org.junit.Before; @@ -103,6 +103,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; /** @@ -114,6 +115,7 @@ * such parameter, hence we want to verify that results are the same in both scenarios. */ @TimeoutSuite(millis = 5 * TimeUnits.MINUTE) // to account for slow as hell VMs +@SuppressWarnings("removal") public class CCSDuelIT extends ESRestTestCase { private static final String INDEX_NAME = "ccs_duel_index"; @@ -438,7 +440,6 @@ public void testSortByFieldOneClusterHasNoResults() throws Exception { assumeMultiClusterSetup(); SearchRequest searchRequest = initSearchRequest(); // set to a value greater than the number of shards to avoid differences due to the skipping of shards - searchRequest.setPreFilterShardSize(128); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); boolean onlyRemote = randomBoolean(); sourceBuilder.query(new TermQueryBuilder("_index", onlyRemote ? REMOTE_INDEX_NAME : INDEX_NAME)); @@ -724,7 +725,11 @@ private static void assumeMultiClusterSetup() { private static SearchRequest initSearchRequest() { List indices = Arrays.asList(INDEX_NAME, "my_remote_cluster:" + INDEX_NAME); Collections.shuffle(indices, random()); - return new SearchRequest(indices.toArray(new String[0])); + final SearchRequest request = new SearchRequest(indices.toArray(new String[0])); + if (randomBoolean()) { + request.setPreFilterShardSize(between(1, 20)); + } + return request; } private static void duelSearch(SearchRequest searchRequest, Consumer responseChecker) throws Exception { @@ -766,6 +771,7 @@ private static void duelSearch(SearchRequest searchRequest, Consumer responseToMap(SearchResponse response) throws shard.remove("fetch"); } } + Map shards = (Map)responseMap.get("_shards"); + if (shards != null) { + shards.remove("skipped"); + } return responseMap; } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java index c5a7694c65d54..a5f7f6fa66649 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/CertGenCliTests.java @@ -132,7 +132,7 @@ public void test40RunWithCert() throws Exception { } private String setElasticPassword() { - Shell.Result result = installation.executables().resetElasticPasswordTool.run("--auto --batch --silent", null); + Shell.Result result = installation.executables().resetPasswordTool.run("--auto --batch --silent --username elastic", null); return result.stdout; } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/EnrollmentProcessTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/EnrollmentProcessTests.java new file mode 100644 index 0000000000000..a54e53557b251 --- /dev/null +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/EnrollmentProcessTests.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.packaging.test; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.packaging.util.Archives; +import org.elasticsearch.packaging.util.Distribution; +import org.elasticsearch.packaging.util.Shell; +import org.junit.BeforeClass; + +import static org.elasticsearch.packaging.util.Archives.installArchive; +import static org.elasticsearch.packaging.util.Archives.verifyArchiveInstallation; +import static org.elasticsearch.packaging.util.FileUtils.getCurrentVersion; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.junit.Assume.assumeTrue; + +public class EnrollmentProcessTests extends PackagingTestCase { + + @BeforeClass + public static void filterDistros() { + assumeTrue("only archives", distribution.isArchive()); + } + + public void test10AutoFormCluster() throws Exception { + /* Windows issue awaits fix: https://github.com/elastic/elasticsearch/issues/49340 */ + assumeTrue("expect command isn't on Windows", distribution.platform != Distribution.Platform.WINDOWS); + installation = installArchive(sh, distribution(), getRootTempDir().resolve("elasticsearch-node1"), getCurrentVersion(), true); + verifyArchiveInstallation(installation, distribution()); + setFileSuperuser("test_superuser", "test_superuser_password"); + sh.getEnv().put("ES_JAVA_OPTS", "-Xms1g -Xmx1g"); + Shell.Result startFirstNode = awaitElasticsearchStartupWithResult( + Archives.startElasticsearchWithTty(installation, sh, null, false) + ); + assertThat(startFirstNode.isSuccess(), is(true)); + // Verify that the first node was auto-configured for security + verifySecurityAutoConfigured(installation); + // Generate a node enrollment token to be subsequently used by the second node + Shell.Result createTokenResult = installation.executables().createEnrollmentToken.run("-s node"); + assertThat(Strings.isNullOrEmpty(createTokenResult.stdout), is(false)); + final String enrollmentToken = createTokenResult.stdout; + // installation now points to the second node + installation = installArchive(sh, distribution(), getRootTempDir().resolve("elasticsearch-node2"), getCurrentVersion(), true); + // auto-configure security using the enrollment token + installation.executables().enrollToExistingCluster.run("--enrollment-token " + enrollmentToken); + // Verify that the second node was also configured (via enrollment) for security + verifySecurityAutoConfigured(installation); + Shell.Result startSecondNode = awaitElasticsearchStartupWithResult( + Archives.startElasticsearchWithTty(installation, sh, null, false) + ); + assertThat(startSecondNode.isSuccess(), is(true)); + // verify that the two nodes formed a cluster + assertThat(makeRequest("https://localhost:9200/_cluster/health"), containsString("\"number_of_nodes\":2")); + } +} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java index 1affcd7646f96..72f481caf9cc3 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagesSecurityAutoConfigurationTests.java @@ -96,21 +96,21 @@ public void test40SecurityNotAutoConfiguredWhenExistingKeystoreUnknownPassword() } private Predicate successfulAutoConfiguration() { - Predicate p1 = output -> output.contains("Authentication and Authorization are enabled."); - Predicate p2 = output -> output.contains("TLS for the transport and the http layers is enabled and configured."); - Predicate p3 = output -> output.contains("The password of the elastic superuser will be set to:"); + Predicate p1 = output -> output.contains("Authentication and authorization are enabled."); + Predicate p2 = output -> output.contains("TLS for the transport and HTTP layers is enabled and configured."); + Predicate p3 = output -> output.contains("The generated password for the elastic built-in superuser is :"); return p1.and(p2).and(p3); } private Predicate existingSecurityConfiguration() { - return output -> output.contains("Security features appear to be already configured."); + return output -> output.contains("Skipping auto-configuration because security features appear to be already configured."); } private Predicate errorOutput() { Predicate p1 = output -> output.contains("Failed to auto-configure security features."); - Predicate p2 = output -> output.contains("Authentication and Authorization are enabled."); - Predicate p3 = output -> output.contains("You can use elasticsearch-reset-elastic-password to set a password"); - Predicate p4 = output -> output.contains("for the elastic user."); + Predicate p2 = output -> output.contains("However, authentication and authorization are still enabled."); + Predicate p3 = output -> output.contains("You can reset the password of the elastic built-in superuser with "); + Predicate p4 = output -> output.contains("'/usr/share/bin/elasticsearch-reset-password -u elastic' at any time."); return p1.and(p2).and(p3).and(p4); } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java index c3c228ae7ec2b..b0a2c83d45fb8 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java @@ -385,23 +385,11 @@ public void awaitElasticsearchStartup(Shell.Result result) throws Exception { } /** - * Call {@link PackagingTestCase#awaitElasticsearchStartup} and return a reference to the Shell.Result from - * starting elasticsearch + * Call {@link PackagingTestCase#awaitElasticsearchStartup} + * returning the result. */ public Shell.Result awaitElasticsearchStartupWithResult(Shell.Result result) throws Exception { - awaitElasticsearchStartupWithResult(result, 0); - return result; - } - - /** - * Call {@link PackagingTestCase#awaitElasticsearchStartup} but wait {@code additionalDelay} milliseconds more before - * returning the result. Useful in order to capture more from the stdout after ES has has successfully started - */ - public Shell.Result awaitElasticsearchStartupWithResult(Shell.Result result, int additionalDelay) throws Exception { awaitElasticsearchStartup(result); - if (additionalDelay > 0) { - Thread.sleep(additionalDelay); - } return result; } @@ -756,7 +744,7 @@ public static Optional getAutoConfigDirName(Installation es) { lsResult = sh.run("find \"" + es.config + "\" -type d -maxdepth 1"); } assertNotNull(lsResult.stdout); - return Arrays.stream(lsResult.stdout.split("\n")).filter(f -> f.contains("tls_auto_config_initial_node_")).findFirst(); + return Arrays.stream(lsResult.stdout.split("\n")).filter(f -> f.contains("tls_auto_config_")).findFirst(); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Archives.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Archives.java index 91a77dabcf821..ebdd13d07fdd0 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Archives.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Archives.java @@ -36,9 +36,9 @@ import static org.elasticsearch.packaging.util.FileUtils.slurp; import static org.elasticsearch.packaging.util.Platforms.isDPKG; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.emptyOrNullString; -import static org.hamcrest.collection.IsCollectionWithSize.hasSize; -import static org.hamcrest.collection.IsEmptyCollection.empty; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsNot.not; @@ -54,19 +54,27 @@ public class Archives { /** This is an arbitrarily chosen value that gives Elasticsearch time to log Bootstrap * errors to the console if they occur before the logging framework is initialized. */ - public static final String ES_STARTUP_SLEEP_TIME_SECONDS = "15"; + public static final String ES_STARTUP_SLEEP_TIME_SECONDS = "25"; public static Installation installArchive(Shell sh, Distribution distribution) throws Exception { - return installArchive(sh, distribution, getDefaultArchiveInstallPath(), getCurrentVersion()); + return installArchive(sh, distribution, getDefaultArchiveInstallPath(), getCurrentVersion(), false); } - public static Installation installArchive(Shell sh, Distribution distribution, Path fullInstallPath, String version) throws Exception { + public static Installation installArchive( + Shell sh, + Distribution distribution, + Path fullInstallPath, + String version, + boolean allowMultiple + ) throws Exception { final Path distributionFile = getDistributionFile(distribution); final Path baseInstallPath = fullInstallPath.getParent(); final Path extractedPath = baseInstallPath.resolve("elasticsearch-" + version); assertThat("distribution file must exist: " + distributionFile.toString(), Files.exists(distributionFile), is(true)); - assertThat("elasticsearch must not already be installed", lsGlob(baseInstallPath, "elasticsearch*"), empty()); + if (allowMultiple == false) { + assertThat("elasticsearch must not already be installed", lsGlob(baseInstallPath, "elasticsearch*"), empty()); + } logger.info("Installing file: " + distributionFile); final String installCommand; @@ -97,9 +105,11 @@ public static Installation installArchive(Shell sh, Distribution distribution, P mv(extractedPath, fullInstallPath); assertThat("extracted archive moved to install location", Files.exists(fullInstallPath)); - final List installations = lsGlob(baseInstallPath, "elasticsearch*"); - assertThat("only the intended installation exists", installations, hasSize(1)); - assertThat("only the intended installation exists", installations.get(0), is(fullInstallPath)); + if (allowMultiple == false) { + final List installations = lsGlob(baseInstallPath, "elasticsearch*"); + assertThat("only the intended installation exists", installations, hasSize(1)); + assertThat("only the intended installation exists", installations.get(0), is(fullInstallPath)); + } Platforms.onLinux(() -> setupArchiveUsersLinux(fullInstallPath)); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java index ee6b4995a218d..8735cbee27fc3 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Installation.java @@ -199,7 +199,9 @@ public class Executables { public final Executable shardTool = new Executable("elasticsearch-shard"); public final Executable nodeTool = new Executable("elasticsearch-node"); public final Executable setupPasswordsTool = new Executable("elasticsearch-setup-passwords"); - public final Executable resetElasticPasswordTool = new Executable("elasticsearch-reset-elastic-password"); + public final Executable resetPasswordTool = new Executable("elasticsearch-reset-password"); + public final Executable createEnrollmentToken = new Executable("elasticsearch-create-enrollment-token"); + public final Executable enrollToExistingCluster = new Executable("elasticsearch-enroll-node"); public final Executable sqlCli = new Executable("elasticsearch-sql-cli"); public final Executable syskeygenTool = new Executable("elasticsearch-syskeygen"); public final Executable usersTool = new Executable("elasticsearch-users"); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java index 39bea196c9d8a..e9cbf24034305 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java @@ -110,8 +110,8 @@ public static Installation installPackage(Shell sh, Distribution distribution, @ private static String captureElasticPasswordFromOutput(Result result) { return Arrays.stream(result.stdout.split(System.lineSeparator())) - .filter(l -> l.contains("The password of the elastic superuser will be set to:")) - .map(l -> l.substring(56, 76)) + .filter(l -> l.contains("The generated password for the elastic built-in superuser is : ")) + .map(l -> l.substring(63, 83)) .findFirst() .orElse(null); } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java index ed8c03fd044a3..e8da74f29bacb 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/ServerUtils.java @@ -198,7 +198,7 @@ public static Path getCaCert(Path configPath) throws IOException { } if (enrollmentEnabled && httpSslEnabled) { assert Files.exists(caCert) == false; - List allAutoconfTLS = FileUtils.lsGlob(configPath, "tls_auto_config_initial_node_*"); + List allAutoconfTLS = FileUtils.lsGlob(configPath, "tls_auto_config_*"); assertThat(allAutoconfTLS.size(), is(1)); Path autoconfTLSDir = allAutoconfTLS.get(0); caCert = autoconfTLSDir.resolve("http_ca.crt"); diff --git a/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java b/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java index 7d5bac02394e4..eeff4a42a8616 100644 --- a/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java +++ b/qa/remote-clusters/src/test/java/org/elasticsearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java @@ -29,6 +29,7 @@ import java.nio.file.Path; import java.util.Collections; +@SuppressWarnings("removal") public abstract class AbstractMultiClusterRemoteTestCase extends ESRestTestCase { private static final String USER = "x_pack_rest_user"; diff --git a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java index 4f71acf1bc562..d52cf409094af 100644 --- a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -54,6 +54,7 @@ *
  • Run against the current version cluster from the second step: {@link TestStep#STEP4_NEW_CLUSTER}
  • * */ +@SuppressWarnings("removal") public class MultiVersionRepositoryAccessIT extends ESRestTestCase { private enum TestStep { diff --git a/qa/repository-old-versions/build.gradle b/qa/repository-old-versions/build.gradle new file mode 100644 index 0000000000000..bfdb7082586ef --- /dev/null +++ b/qa/repository-old-versions/build.gradle @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + + +import org.apache.tools.ant.taskdefs.condition.Os +import org.elasticsearch.gradle.Architecture +import org.elasticsearch.gradle.OS +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.internal.test.AntFixture +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask + +apply plugin: 'elasticsearch.jdk-download' +apply plugin: 'elasticsearch.internal-testclusters' +apply plugin: 'elasticsearch.standalone-rest-test' + +configurations { + oldesFixture +} + +dependencies { + oldesFixture project(':test:fixtures:old-elasticsearch') + testImplementation project(':client:rest-high-level') +} + +jdks { + legacy { + vendor = 'adoptium' + version = '8u302+b08' + platform = OS.current().name().toLowerCase() + architecture = Architecture.current().name().toLowerCase() + } +} + +if (Os.isFamily(Os.FAMILY_WINDOWS)) { + logger.warn("Disabling repository-old-versions tests because we can't get the pid file on windows") +} else { + /* Set up tasks to unzip and run the old versions of ES before running the integration tests. + * To avoid testing against too many old versions, always pick first and last version per major + */ + for (String versionString : ['5.0.0', '5.6.16', '6.0.0', '6.8.20']) { + Version version = Version.fromString(versionString) + String packageName = 'org.elasticsearch.distribution.zip' + String artifact = "${packageName}:elasticsearch:${version}@zip" + String versionNoDots = version.toString().replace('.', '_') + String configName = "es${versionNoDots}" + + configurations.create(configName) + + dependencies.add(configName, artifact) + + // TODO Rene: we should be able to replace these unzip tasks with gradle artifact transforms + TaskProvider unzip = tasks.register("unzipEs${versionNoDots}", Sync) { + Configuration oldEsDependency = configurations[configName] + dependsOn oldEsDependency + /* Use a closure here to delay resolution of the dependency until we need + * it */ + from { + oldEsDependency.collect { zipTree(it) } + } + into temporaryDir + } + + String repoLocation = "${buildDir}/cluster/shared/repo/${versionNoDots}" + + String clusterName = versionNoDots + + def testClusterProvider = testClusters.register(clusterName) { + setting 'path.repo', repoLocation + setting 'xpack.security.enabled', 'false' + } + + TaskProvider fixture = tasks.register("oldES${versionNoDots}Fixture", AntFixture) { + dependsOn project.configurations.oldesFixture, jdks.legacy + dependsOn unzip + executable = "${BuildParams.runtimeJavaHome}/bin/java" + env 'CLASSPATH', "${-> project.configurations.oldesFixture.asPath}" + // old versions of Elasticsearch need JAVA_HOME + env 'JAVA_HOME', jdks.legacy.javaHomePath + // If we are running on certain arm systems we need to explicitly set the stack size to overcome JDK page size bug + if (Architecture.current() == Architecture.AARCH64) { + env 'ES_JAVA_OPTS', '-Xss512k' + } + args 'oldes.OldElasticsearch', + baseDir, + unzip.get().temporaryDir, + false, + "path.repo: ${repoLocation}" + maxWaitInSeconds 60 + waitCondition = { fixture, ant -> + // the fixture writes the ports file when Elasticsearch's HTTP service + // is ready, so we can just wait for the file to exist + return fixture.portsFile.exists() + } + } + + tasks.register("javaRestTest#${versionNoDots}", StandaloneRestIntegTestTask) { + useCluster testClusterProvider + dependsOn fixture + doFirst { + delete(repoLocation) + mkdir(repoLocation) + } + systemProperty "tests.repo.location", repoLocation + systemProperty "tests.es.version", version.toString() + /* Use a closure on the string to delay evaluation until right before we + * run the integration tests so that we can be sure that the file is + * ready. */ + nonInputProperties.systemProperty "tests.es.port", "${-> fixture.get().addressAndPort}" + nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusterProvider.get().allHttpSocketURI.join(",")}") + nonInputProperties.systemProperty('tests.clustername', "${-> testClusterProvider.get().getName()}") + } + + tasks.named("check").configure { + dependsOn "javaRestTest#${versionNoDots}" + } + } +} + diff --git a/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java b/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java new file mode 100644 index 0000000000000..140ef92f9507f --- /dev/null +++ b/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.oldrepos; + +import org.apache.http.HttpHost; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.client.Node; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; + +public class OldRepositoryAccessIT extends ESRestTestCase { + @Override + protected Map>> wipeSnapshots() { + return Collections.emptyMap(); + } + + @SuppressWarnings("removal") + public void testOldRepoAccess() throws IOException { + String repoLocation = System.getProperty("tests.repo.location"); + Version oldVersion = Version.fromString(System.getProperty("tests.es.version")); + + int oldEsPort = Integer.parseInt(System.getProperty("tests.es.port")); + try ( + RestHighLevelClient client = new RestHighLevelClient(RestClient.builder(adminClient().getNodes().toArray(new Node[0]))); + RestClient oldEs = RestClient.builder(new HttpHost("127.0.0.1", oldEsPort)).build() + ) { + try { + Request createIndex = new Request("PUT", "/test"); + int numberOfShards = randomIntBetween(1, 3); + createIndex.setJsonEntity("{\"settings\":{\"number_of_shards\": " + numberOfShards + "}}"); + oldEs.performRequest(createIndex); + + for (int i = 0; i < 5; i++) { + Request doc = new Request("PUT", "/test/doc/testdoc" + i); + doc.addParameter("refresh", "true"); + doc.setJsonEntity("{\"test\":\"test" + i + "\", \"val\":" + i + "}"); + oldEs.performRequest(doc); + } + + // register repo on old ES and take snapshot + Request createRepoRequest = new Request("PUT", "/_snapshot/testrepo"); + createRepoRequest.setJsonEntity("{\"type\":\"fs\",\"settings\":{\"location\":\"" + repoLocation + "\"}}"); + oldEs.performRequest(createRepoRequest); + + Request createSnapshotRequest = new Request("PUT", "/_snapshot/testrepo/snap1"); + createSnapshotRequest.addParameter("wait_for_completion", "true"); + createSnapshotRequest.setJsonEntity("{\"indices\":\"test\"}"); + oldEs.performRequest(createSnapshotRequest); + + // register repo on new ES + ElasticsearchAssertions.assertAcked( + client.snapshot() + .createRepository( + new PutRepositoryRequest("testrepo").type("fs") + .settings(Settings.builder().put("location", repoLocation).build()), + RequestOptions.DEFAULT + ) + ); + + // list snapshots on new ES + List snapshotInfos = client.snapshot() + .get(new GetSnapshotsRequest("testrepo").snapshots(new String[] { "_all" }), RequestOptions.DEFAULT) + .getSnapshots(); + assertThat(snapshotInfos, hasSize(1)); + SnapshotInfo snapshotInfo = snapshotInfos.get(0); + assertEquals("snap1", snapshotInfo.snapshotId().getName()); + assertEquals("testrepo", snapshotInfo.repository()); + assertEquals(Arrays.asList("test"), snapshotInfo.indices()); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertEquals(numberOfShards, snapshotInfo.successfulShards()); + assertEquals(numberOfShards, snapshotInfo.totalShards()); + assertEquals(0, snapshotInfo.failedShards()); + assertEquals(oldVersion, snapshotInfo.version()); + + // list specific snapshot on new ES + snapshotInfos = client.snapshot() + .get(new GetSnapshotsRequest("testrepo").snapshots(new String[] { "snap1" }), RequestOptions.DEFAULT) + .getSnapshots(); + assertThat(snapshotInfos, hasSize(1)); + snapshotInfo = snapshotInfos.get(0); + assertEquals("snap1", snapshotInfo.snapshotId().getName()); + assertEquals("testrepo", snapshotInfo.repository()); + assertEquals(Arrays.asList("test"), snapshotInfo.indices()); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); + assertEquals(numberOfShards, snapshotInfo.successfulShards()); + assertEquals(numberOfShards, snapshotInfo.totalShards()); + assertEquals(0, snapshotInfo.failedShards()); + assertEquals(oldVersion, snapshotInfo.version()); + + // list advanced snapshot info on new ES + SnapshotsStatusResponse snapshotsStatusResponse = client.snapshot() + .status(new SnapshotsStatusRequest("testrepo").snapshots(new String[] { "snap1" }), RequestOptions.DEFAULT); + assertThat(snapshotsStatusResponse.getSnapshots(), hasSize(1)); + SnapshotStatus snapshotStatus = snapshotsStatusResponse.getSnapshots().get(0); + assertEquals("snap1", snapshotStatus.getSnapshot().getSnapshotId().getName()); + assertEquals("testrepo", snapshotStatus.getSnapshot().getRepository()); + assertEquals(Sets.newHashSet("test"), snapshotStatus.getIndices().keySet()); + assertEquals(SnapshotsInProgress.State.SUCCESS, snapshotStatus.getState()); + assertEquals(numberOfShards, snapshotStatus.getShardsStats().getDoneShards()); + assertEquals(numberOfShards, snapshotStatus.getShardsStats().getTotalShards()); + assertEquals(0, snapshotStatus.getShardsStats().getFailedShards()); + assertThat(snapshotStatus.getStats().getTotalSize(), greaterThan(0L)); + assertThat(snapshotStatus.getStats().getTotalFileCount(), greaterThan(0)); + } finally { + oldEs.performRequest(new Request("DELETE", "/test")); + } + } + } + +} diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java index 8bba5325cec8d..0591e0521ff78 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java @@ -91,10 +91,10 @@ public void testGetFeatureUpgradeStatus() throws Exception { assertThat(feature.size(), equalTo(4)); assertThat(feature.get("minimum_index_version"), equalTo(UPGRADE_FROM_VERSION.toString())); - if (UPGRADE_FROM_VERSION.before(Version.CURRENT.minimumIndexCompatibilityVersion())) { - assertThat(feature.get("upgrade_status"), equalTo("UPGRADE_NEEDED")); + if (UPGRADE_FROM_VERSION.before(Version.V_8_0_0)) { + assertThat(feature.get("migration_status"), equalTo("MIGRATION_NEEDED")); } else { - assertThat(feature.get("upgrade_status"), equalTo("NO_UPGRADE_NEEDED")); + assertThat(feature.get("migration_status"), equalTo("NO_MIGRATION_NEEDED")); } }); } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index ffad3a5f1ff40..1973ddc218bf4 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -200,7 +200,7 @@ public void testDateNanosFormatUpgrade() throws IOException { Request index = new Request("POST", "/" + indexName + "/_doc/"); XContentBuilder doc = XContentBuilder.builder(XContentType.JSON.xContent()) .startObject() - .field("date", "2015-01-01T12:10:30.123456789Z") + .field("date", "2015-01-01T12:10:30.123Z") .field("date_nanos", "2015-01-01T12:10:30.123456789Z") .endObject(); index.addParameter("refresh", "true"); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java index 56f52fc403265..545cc57da4e78 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java @@ -157,6 +157,7 @@ private void doTestPagination(String repoName, assertNull(batch3LargeLimit.next()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/79779") public void testSortAndPaginateWithInProgress() throws Exception { final String repoName = "test-repo"; AbstractSnapshotIntegTestCase.createRepository(logger, repoName, "mock"); diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml index 0f514f2213492..a096814689a64 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml @@ -9,7 +9,7 @@ index: timetest body: mappings: - "properties": { "my_time": {"type": "date", "format": "strict_date_optional_time_nanos"}} + "properties": { "my_time": {"type": "date_nanos", "format": "strict_date_optional_time_nanos"}} - do: ingest.put_pipeline: diff --git a/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/FeatureUpgradeApiIT.java b/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/FeatureUpgradeApiIT.java index af9839a772a10..32e5abfb9a3e0 100644 --- a/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/FeatureUpgradeApiIT.java +++ b/qa/system-indices/src/javaRestTest/java/org/elasticsearch/system/indices/FeatureUpgradeApiIT.java @@ -52,8 +52,8 @@ public void testGetFeatureUpgradedStatuses() throws Exception { Response response = client().performRequest(new Request("GET", "/_migration/system_features")); assertThat(response.getStatusLine().getStatusCode(), is(200)); XContentTestUtils.JsonMapView view = XContentTestUtils.createJsonMapView(response.getEntity().getContent()); - String upgradeStatus = view.get("upgrade_status"); - assertThat(upgradeStatus, equalTo("NO_UPGRADE_NEEDED")); + String upgradeStatus = view.get("migration_status"); + assertThat(upgradeStatus, equalTo("NO_MIGRATION_NEEDED")); List> features = view.get("features"); Map testFeature = features.stream() .filter(feature -> "system indices qa".equals(feature.get("feature_name"))) @@ -62,7 +62,7 @@ public void testGetFeatureUpgradedStatuses() throws Exception { assertThat(testFeature.size(), equalTo(4)); assertThat(testFeature.get("minimum_index_version"), equalTo(Version.CURRENT.toString())); - assertThat(testFeature.get("upgrade_status"), equalTo("NO_UPGRADE_NEEDED")); + assertThat(testFeature.get("migration_status"), equalTo("NO_MIGRATION_NEEDED")); assertThat(testFeature.get("indices"), instanceOf(List.class)); assertThat((List) testFeature.get("indices"), hasSize(1)); diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientEmployeeResource.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientEmployeeResource.java index 6e271b2af7532..ae1f90f32bef5 100644 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientEmployeeResource.java +++ b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientEmployeeResource.java @@ -36,6 +36,7 @@ import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @Path("/employees") +@SuppressWarnings("removal") public class RestHighLevelClientEmployeeResource { @Inject diff --git a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientProducer.java b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientProducer.java index 958985c889a4c..dd8cdb193343e 100644 --- a/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientProducer.java +++ b/qa/wildfly/src/main/java/org/elasticsearch/wildfly/transport/RestHighLevelClientProducer.java @@ -22,6 +22,7 @@ public final class RestHighLevelClientProducer { @Produces + @SuppressWarnings("removal") public RestHighLevelClient createRestHighLevelClient() { String httpUri = System.getProperty("elasticsearch.uri"); diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 0cef59a464047..0f26777f04ec7 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -81,6 +81,7 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTest("search.aggregation/20_terms/string profiler via global ordinals native implementation", "The profiler results aren't backwards compatible.") task.skipTest("search.aggregation/20_terms/string profiler via map", "The profiler results aren't backwards compatible.") task.skipTest("search.aggregation/20_terms/numeric profiler", "The profiler results aren't backwards compatible.") + task.skipTest("migration/10_get_feature_upgrade_status/Get feature upgrade status", "Awaits backport") task.replaceValueInMatch("_type", "_doc") task.addAllowedWarningRegex("\\[types removal\\].*") diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json index 91712bbbded29..7d33fdd52ab81 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.health.json @@ -102,6 +102,10 @@ "red" ], "description":"Wait until cluster is in a specific state" + }, + "return_200_for_cluster_health_timeout":{ + "type":"boolean", + "description":"Whether to return HTTP 200 instead of 408 in case of a cluster health timeout from the server side" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.freeze.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.freeze.json deleted file mode 100644 index f7ef92dfd4266..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.freeze.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "indices.freeze":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/freeze-index-api.html", - "description":"Freezes an index. A frozen index has almost no overhead on the cluster (except for maintaining its metadata in memory) and is read-only." - }, - "stability":"stable", - "visibility":"public", - "headers":{ - "accept": [ "application/json"] - }, - "url":{ - "paths":[ - { - "path":"/{index}/_freeze", - "methods":[ - "POST" - ], - "parts":{ - "index":{ - "type":"string", - "description":"The name of the index to freeze" - } - }, - "deprecated":{ - "version":"7.14.0", - "description":"Frozen indices are deprecated because they provide no benefit given improvements in heap memory utilization. They will be removed in a future release." - } - } - ] - }, - "params":{ - "timeout":{ - "type":"time", - "description":"Explicit operation timeout" - }, - "master_timeout":{ - "type":"time", - "description":"Specify timeout for connection to master" - }, - "ignore_unavailable":{ - "type":"boolean", - "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices":{ - "type":"boolean", - "description":"Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards":{ - "type":"enum", - "options":[ - "open", - "closed", - "hidden", - "none", - "all" - ], - "default":"closed", - "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "wait_for_active_shards":{ - "type":"string", - "description":"Sets the number of active shards to wait for before the operation returns." - } - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.modify_data_stream.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.modify_data_stream.json new file mode 100644 index 0000000000000..ea095289b72bc --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.modify_data_stream.json @@ -0,0 +1,28 @@ +{ + "indices.modify_data_stream":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-streams.html", + "description":"Modifies a data stream" + }, + "stability":"stable", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_data_stream/_modify", + "methods":["POST"] + } + ] + }, + "params":{ + }, + "body":{ + "description":"The data stream modifications", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/knn_search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/knn_search.json new file mode 100644 index 0000000000000..b55f35ccab4fe --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/knn_search.json @@ -0,0 +1,40 @@ +{ + "knn_search":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/search-search.html", + "description":"Performs a kNN search." + }, + "stability":"experimental", + "visibility":"public", + "headers":{ + "accept": [ "application/json"], + "content_type": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/{index}/_knn_search", + "methods":[ + "GET", + "POST" + ], + "parts":{ + "index":{ + "type":"list", + "description":"A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices" + } + } + } + ] + }, + "params": { + "routing":{ + "type":"list", + "description":"A comma-separated list of specific routing values" + } + }, + "body":{ + "description":"The search definition" + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.delete_trained_model.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.delete_trained_model.json index e2267cf916271..0faae8241b3b2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.delete_trained_model.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.delete_trained_model.json @@ -24,6 +24,14 @@ } } ] + }, + "params":{ + "timeout":{ + "type":"time", + "required":false, + "description":"Controls the amount of time to wait for the model to be deleted.", + "default": "30s" + } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json index 1237929976696..8c8794e05c282 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.hot_threads.json @@ -53,10 +53,19 @@ "options":[ "cpu", "wait", - "block" + "block", + "mem" ], "description":"The type to sample (default: cpu)" }, + "sort":{ + "type":"enum", + "options":[ + "cpu", + "total" + ], + "description":"The sort order for 'cpu' type (default: total)" + }, "timeout":{ "type":"time", "description":"Explicit operation timeout" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/20_request_timeout.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/20_request_timeout.yml index 66a7cb2b48dbd..74261d799ba7d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/20_request_timeout.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.health/20_request_timeout.yml @@ -35,3 +35,25 @@ - match: { initializing_shards: 0 } - match: { unassigned_shards: 0 } - gte: { number_of_pending_tasks: 0 } + +--- +"cluster health request timeout with 200 response code": + - skip: + version: " - 7.15.99" + reason: "return_200_for_cluster_health_timeout was added in 7.16" + - do: + cluster.health: + timeout: 1ms + wait_for_active_shards: 5 + return_200_for_cluster_health_timeout: true + + - is_true: cluster_name + - is_true: timed_out + - gte: { number_of_nodes: 1 } + - gte: { number_of_data_nodes: 1 } + - match: { active_primary_shards: 0 } + - match: { active_shards: 0 } + - match: { relocating_shards: 0 } + - match: { initializing_shards: 0 } + - match: { unassigned_shards: 0 } + - gte: { number_of_pending_tasks: 0 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml index 05937b73324bd..cd971882316d9 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml @@ -1,7 +1,7 @@ --- "Test put and reset transient settings": - skip: - version: " - 7.99.99" + version: " - 7.15.99" reason: "transient settings deprecation" features: "warnings" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/migration/10_get_feature_upgrade_status.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/migration/10_get_feature_upgrade_status.yml index 1fd5bc9c85a24..fefa4bb48230b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/migration/10_get_feature_upgrade_status.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/migration/10_get_feature_upgrade_status.yml @@ -7,5 +7,5 @@ - do: migration.get_feature_upgrade_status: {} - - is_true: upgrade_status + - is_true: migration_status - is_true: features diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.hot_threads/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.hot_threads/10_basic.yml new file mode 100644 index 0000000000000..69bbfc0042de2 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.hot_threads/10_basic.yml @@ -0,0 +1,51 @@ +--- +"Nodes hot threads - CPU": + - do: + nodes.hot_threads: + type: "cpu" + - match: + $body: | + /Hot\ threads\ at/ +--- +"Nodes hot threads - CPU sort": + - do: + nodes.hot_threads: + type: "cpu" + sort: "cpu" + - match: + $body: | + /Hot\ threads\ at/ +--- +"Nodes hot threads - WAIT": + - do: + nodes.hot_threads: + type: "wait" + - match: + $body: | + /Hot\ threads\ at/ +--- +"Nodes hot threads - BLOCK": + - do: + nodes.hot_threads: + type: "block" + - match: + $body: | + /Hot\ threads\ at/ +--- +"Nodes hot threads - MEM": + - do: + nodes.hot_threads: + type: "mem" + - match: + $body: | + /Hot\ threads\ at/ +--- +"Nodes hot threads - BAD": + - do: + catch: bad_request + nodes.hot_threads: + type: "gpu" + - match: { status: 400 } + - match: { error.type: illegal_argument_exception } + - match: { error.reason: "type not supported [gpu]" } + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/49_range_timezone_bug.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/49_range_timezone_bug.yml index 481c32f688be6..ddba9d20e83d3 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/49_range_timezone_bug.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/49_range_timezone_bug.yml @@ -8,7 +8,7 @@ setup: mappings: properties: mydate: - type: date + type: date_nanos format: "uuuu-MM-dd'T'HH:mm:ss.SSSSSSSSSZZZZZ" - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/161_exists_query_within_nested_query.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/161_exists_query_within_nested_query.yml new file mode 100644 index 0000000000000..592147c0c1d93 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/161_exists_query_within_nested_query.yml @@ -0,0 +1,840 @@ +setup: + - skip: + features: ["headers", "allowed_warnings"] + + - do: + indices.create: + index: test + body: + mappings: + dynamic: false + properties: + nested: + type: nested + properties: + binary: + type: binary + doc_values: true + boolean: + type: boolean + date: + type: date + geo_point: + type: geo_point + ip: + type: ip + keyword: + type: keyword + byte: + type: byte + double: + type: double + float: + type: float + half_float: + type: half_float + integer: + type: integer + long: + type: long + short: + type: short + object: + type: object + properties: + inner1: + type: keyword + inner2: + type: keyword + text: + type: text + + - do: + headers: + Content-Type: application/json + index: + index: "test" + id: 1 + body: + nested: + - binary: "YWJjZGUxMjM0" + boolean: true + date: "2017-01-01" + geo_point: [0.0, 20.0] + ip: "192.168.0.1" + keyword: "foo" + - byte: 1 + double: 1.0 + float: 1.0 + half_float: 1.0 + integer: 1 + - long: 1 + short: 1 + object: + inner1: "foo" + inner2: "bar" + text: "foo bar" + + - do: + headers: + Content-Type: application/json + index: + index: "test" + id: 2 + body: + nested: + - binary: "YWJjZGUxMjM0" + boolean: false + date: "2017-01-01" + geo_point: [0.0, 20.0] + ip: "192.168.0.1" + keyword: "foo" + - byte: 1 + double: 1.0 + float: 1.0 + half_float: 1.0 + integer: 1 + - long: 1 + short: 1 + object: + inner1: "foo" + text: "foo bar" + + - do: + headers: + Content-Type: application/json + index: + index: "test" + id: 3 + routing: "route_me" + body: + nested: + - binary: "YWJjZGUxMjM0" + boolean: true + date: "2017-01-01" + geo_point: [0.0, 20.0] + ip: "192.168.0.1" + keyword: "foo" + - byte: 1 + double: 1.0 + float: 1.0 + half_float: 1.0 + integer: 1 + - long: 1 + short: 1 + object: + inner2: "bar" + text: "foo bar" + + - do: + index: + index: "test" + id: 4 + body: {} + + - do: + indices.create: + index: test-no-dv + body: + mappings: + dynamic: false + properties: + nested: + type: nested + properties: + binary: + type: binary + doc_values: false + store: true + boolean: + type: boolean + doc_values: false + date: + type: date + doc_values: false + geo_point: + type: geo_point + doc_values: false + ip: + type: ip + doc_values: false + keyword: + type: keyword + doc_values: false + byte: + type: byte + doc_values: false + double: + type: double + doc_values: false + float: + type: float + doc_values: false + half_float: + type: half_float + doc_values: false + integer: + type: integer + doc_values: false + long: + type: long + doc_values: false + short: + type: short + doc_values: false + object: + type: object + properties: + inner1: + type: keyword + doc_values: false + inner2: + type: keyword + doc_values: false + text: + type: text + + - do: + headers: + Content-Type: application/json + index: + index: "test-no-dv" + id: 1 + body: + nested: + - binary: "YWJjZGUxMjM0" + boolean: true + date: "2017-01-01" + geo_point: [0.0, 20.0] + ip: "192.168.0.1" + keyword: "foo" + - byte: 1 + double: 1.0 + float: 1.0 + half_float: 1.0 + integer: 1 + - long: 1 + short: 1 + object: + inner1: "foo" + inner2: "bar" + text: "foo bar" + + - do: + headers: + Content-Type: application/json + index: + index: "test-no-dv" + id: 2 + body: + nested: + - binary: "YWJjZGUxMjM0" + boolean: false + date: "2017-01-01" + geo_point: [0.0, 20.0] + ip: "192.168.0.1" + keyword: "foo" + - byte: 1 + double: 1.0 + float: 1.0 + half_float: 1.0 + integer: 1 + - long: 1 + short: 1 + object: + inner1: "foo" + text: "foo bar" + + - do: + headers: + Content-Type: application/json + index: + index: "test-no-dv" + id: 3 + routing: "route_me" + body: + nested: + - binary: "YWJjZGUxMjM0" + boolean: true + date: "2017-01-01" + geo_point: [0.0, 20.0] + ip: "192.168.0.1" + keyword: "foo" + - byte: 1 + double: 1.0 + float: 1.0 + half_float: 1.0 + integer: 1 + - long: 1 + short: 1 + object: + inner2: "bar" + text: "foo bar" + + - do: + index: + index: "test-no-dv" + id: 4 + body: {} + + - do: + indices.refresh: + index: [test, test-no-dv] + +--- +"Test exists query within nested query on mapped binary field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.binary + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped boolean field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.boolean + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped date field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.date + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped geo_point field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.geo_point + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped ip field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.ip + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped keyword field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.keyword + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped byte field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.byte + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped double field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.double + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped float field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.float + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped half_float field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.half_float + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped integer field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.integer + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped long field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.long + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped short field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.short + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped object field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.object + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped object inner field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.object.inner1 + + - match: {hits.total: 2} + +--- +"Test exists query within nested query on mapped text field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + nested: + path: nested + query: + exists: + field: nested.text + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped binary field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.binary + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped boolean field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.boolean + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped date field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.date + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped geo_point field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.geo_point + + - match: {hits.total: 3} + + +--- +"Test exists query within nested query on mapped ip field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.ip + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped keyword field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.keyword + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped byte field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.byte + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped double field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.double + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped float field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.float + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped half_float field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.half_float + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped integer field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.integer + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped long field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.long + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped short field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.short + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped object field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.object + + - match: {hits.total: 3} + +--- +"Test exists query within nested query on mapped object inner field with no doc values": + - skip: + version: " - 7.99.99" + reason: "Fixed in 7.16 (backport pending)" + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.object.inner1 + + - match: {hits.total: 2} + +--- +"Test exists query within nested query on mapped text field with no doc values": + - do: + search: + rest_total_hits_as_int: true + index: test-no-dv + body: + query: + nested: + path: nested + query: + exists: + field: nested.text + + - match: {hits.total: 3} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml new file mode 100644 index 0000000000000..9d61d4c359b6d --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml @@ -0,0 +1,179 @@ + +--- +date: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.16.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset] + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + + - do: + indices.get_mapping: + index: test + - match: { "test.mappings.properties.@timestamp.type": date } + - match: { 'test.mappings._data_stream_timestamp.enabled': true } + + - do: + bulk: + refresh: true + index: test_index + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod"}' + + - do: + search: + index: test_index + body: + docvalue_fields: [ '@timestamp' ] + - match: {hits.total.value: 1} + - match: { "hits.hits.0.fields.@timestamp": ["2021-04-28T18:50:04.467Z"] } + +--- +date_nanos: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.16.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset] + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date_nanos + metricset: + type: keyword + time_series_dimension: true + + - do: + indices.get_mapping: + index: test + - match: { "test.mappings.properties.@timestamp.type": date_nanos } + - match: { 'test.mappings._data_stream_timestamp.enabled': true } + + - do: + bulk: + refresh: true + index: test_index + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod"}' + + - do: + search: + index: test_index + body: + docvalue_fields: [ '@timestamp' ] + - match: {hits.total.value: 1} + - match: { "hits.hits.0.fields.@timestamp": ["2021-04-28T18:50:04.467Z"] } + +--- +automatically add with date: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.16.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset] + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + metricset: + type: keyword + time_series_dimension: true + + - do: + indices.get_mapping: + index: test + - match: { 'test.mappings.properties.@timestamp': { "type": date } } + - match: { 'test.mappings._data_stream_timestamp.enabled': true } + + - do: + bulk: + refresh: true + index: test_index + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod"}' + + - do: + search: + index: test_index + body: + docvalue_fields: [ '@timestamp' ] + - match: {hits.total.value: 1} + - match: { "hits.hits.0.fields.@timestamp": ["2021-04-28T18:50:04.467Z"] } + +--- +reject @timestamp with wrong type: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.16.0 + + - do: + catch: /data stream timestamp field \[@timestamp\] is of type \[keyword\], but \[date,date_nanos\] is expected/ + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset] + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: keyword + +--- +reject timestamp meta field with wrong type: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 to be backported to 7.16.0 + + - do: + catch: /.* time series index \[_data_stream_timestamp\] meta field must be enabled/ + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset] + number_of_replicas: 0 + number_of_shards: 2 + mappings: + _data_stream_timestamp: + enabled: false diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml index 597c6488e6827..c7b8b97b32ff4 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml @@ -1,4 +1,4 @@ -add time series mappings: +ecs style: - skip: version: " - 7.99.99" reason: introduced in 8.0.0 @@ -49,3 +49,204 @@ add time series mappings: latency: type: double time_series_metric: gauge + +--- +top level dim object: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 + + - do: + indices.create: + index: tsdb_index + body: + settings: + index: + mode: time_series + routing_path: [dim.*] + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + dim: + properties: + metricset: + type: keyword + time_series_dimension: true + uid: + type: keyword + time_series_dimension: true + k8s: + properties: + pod: + properties: + availability_zone: + type: short + time_series_dimension: true + name: + type: keyword + ip: + type: ip + time_series_dimension: true + network: + properties: + tx: + type: long + time_series_metric: counter + rx: + type: integer + time_series_metric: gauge + packets_dropped: + type: long + time_series_metric: gauge + latency: + type: double + time_series_metric: gauge + +--- +non keyword matches routing_path: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 + + - do: + catch: '/All fields that match routing_path must be keywords with \[time_series_dimension: true\] and without the \[script\] parameter. \[@timestamp\] was \[date\]./' + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + routing_path: [metricset, k8s.pod.uid, "@timestamp"] + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + +--- +runtime field matching routing path: + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [dim.*] + mappings: + properties: + "@timestamp": + type: date + + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "dim": {"foo": "a"}}' + + - do: + catch: /runtime fields may not match \[routing_path\] but \[dim.bar\] matched/ + search: + index: test + body: + runtime_mappings: + dim.bar: + type: keyword + query: + match: + dim.foo: a + +--- +"dynamic: runtime matches routing_path": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [dim.*] + mappings: + properties: + "@timestamp": + type: date + dim: + type: object + dynamic: runtime + + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "dim": {"foo": "a"}}' + - match: {items.0.index.error.reason: "All fields that match routing_path must be keywords with [time_series_dimension: true] and without the [script] parameter. [dim.foo] was a runtime [keyword]."} + +--- +"dynamic: false matches routing_path": + - skip: + version: " - 7.99.99" + reason: introduced in 8.0.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [dim.*] + mappings: + properties: + "@timestamp": + type: date + dim: + type: object + dynamic: false + + - do: + bulk: + refresh: true + index: test + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "dim": {"foo": "a"}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "dim": {"foo": {"bar": "a"}}}' + - match: {items.0.index.error.reason: "All fields matching [routing_path] must be mapped but [dim.foo] was declared as [dynamic: false]"} + - match: {items.1.index.error.reason: "All fields matching [routing_path] must be mapped but [dim.foo] was declared as [dynamic: false]"} diff --git a/server/build.gradle b/server/build.gradle index af891c853ac9e..ec2942f19c88a 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -6,6 +6,8 @@ * Side Public License, v 1. */ +import org.elasticsearch.gradle.internal.info.BuildParams + apply plugin: 'elasticsearch.build' apply plugin: 'nebula.optional-base' apply plugin: 'elasticsearch.publish' @@ -127,6 +129,12 @@ tasks.named("processResources").configure { dependsOn generateModulesList, generatePluginsList } +if (BuildParams.isSnapshotBuild() == false) { + tasks.named("test").configure { + systemProperty 'es.index_mode_feature_flag_registered', 'true' + } +} + tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( // from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml) @@ -249,9 +257,6 @@ tasks.named('splitPackagesAudit').configure { ignoreClasses 'org.apache.lucene.queries.BinaryDocValuesRangeQuery', 'org.apache.lucene.queries.BlendedTermQuery', 'org.apache.lucene.queries.SpanMatchNoDocsQuery', - 'org.apache.lucene.search.grouping.CollapseTopFieldDocs', - 'org.apache.lucene.search.grouping.CollapsingDocValuesSource', - 'org.apache.lucene.search.grouping.CollapsingTopDocsCollector', 'org.apache.lucene.search.vectorhighlight.CustomFieldQuery', // These are tricky because Lucene itself splits the index package, diff --git a/server/licenses/lucene-analysis-common-9.0.0-snapshot-2719cf6630e.jar.sha1 b/server/licenses/lucene-analysis-common-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..3776178ca7cd4 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +f20ab3e51a047780cae90d452f1e252ad04c9fdc \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/server/licenses/lucene-analysis-common-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 1475363327951..0000000000000 --- a/server/licenses/lucene-analysis-common-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5a90d530649b981da658ddda7889f1a3db2eec84 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.0.0-snapshot-2719cf6630e.jar.sha1 b/server/licenses/lucene-backward-codecs-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..49ee9f92f4cbc --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +92c3693907074392d6c4ce52eb2cfdc66f58bdd1 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/server/licenses/lucene-backward-codecs-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index f7ceb099c3bcd..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -077984d39a5ffcdf1d3b6a41f0bc91352ad1a9a6 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.0.0-snapshot-2719cf6630e.jar.sha1 b/server/licenses/lucene-core-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..11aa9e6d17190 --- /dev/null +++ b/server/licenses/lucene-core-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +39c998e0de591232e0ca7e5c219e204369a185fc \ No newline at end of file diff --git a/server/licenses/lucene-core-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/server/licenses/lucene-core-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index d4cc15a3c336f..0000000000000 --- a/server/licenses/lucene-core-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9c2860726c0ce9a32b6c90976e2338e70c9ebfd2 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.0.0-snapshot-2719cf6630e.jar.sha1 b/server/licenses/lucene-grouping-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..37c51ab1a7171 --- /dev/null +++ b/server/licenses/lucene-grouping-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +6e7b0916a72d74fd2f0304ddb4e0a9549e9a050c \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/server/licenses/lucene-grouping-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 133859fb80755..0000000000000 --- a/server/licenses/lucene-grouping-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d97143f3691c72d62ca591df230399addccc93e7 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.0.0-snapshot-2719cf6630e.jar.sha1 b/server/licenses/lucene-highlighter-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..d79b277dfd33d --- /dev/null +++ b/server/licenses/lucene-highlighter-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +69d9eab228ed733706e61b4ab42d11b4f0d400c5 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/server/licenses/lucene-highlighter-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index aa29f011018c0..0000000000000 --- a/server/licenses/lucene-highlighter-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -22b9996fab3f8cc167c3aa0015f859efc5dd5de3 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.0.0-snapshot-2719cf6630e.jar.sha1 b/server/licenses/lucene-join-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..aa69d85859c8a --- /dev/null +++ b/server/licenses/lucene-join-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +68e29074313f808d6fe927ef0f03958f17530d7b \ No newline at end of file diff --git a/server/licenses/lucene-join-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/server/licenses/lucene-join-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 1d1bed7d380bf..0000000000000 --- a/server/licenses/lucene-join-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b1b1b6efd48c353ea930cc159ed526821ae80551 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.0.0-snapshot-2719cf6630e.jar.sha1 b/server/licenses/lucene-memory-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..28d296f5cdf2d --- /dev/null +++ b/server/licenses/lucene-memory-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +fd74375aba476634a523942339cd86035348cebc \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/server/licenses/lucene-memory-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 0382d46d7faf4..0000000000000 --- a/server/licenses/lucene-memory-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab2cb78c651898e51060d0858ce96cc390cbc951 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.0.0-snapshot-2719cf6630e.jar.sha1 b/server/licenses/lucene-misc-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..d8c6010ef7694 --- /dev/null +++ b/server/licenses/lucene-misc-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +ca38208d31f839ba48f7a3a672b6fe5a5a3993a5 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/server/licenses/lucene-misc-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 26ae55e13363d..0000000000000 --- a/server/licenses/lucene-misc-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f94f74e9f491fb2d3bed9f71ac39d9f7047731d5 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.0.0-snapshot-2719cf6630e.jar.sha1 b/server/licenses/lucene-queries-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..2b86a77ce6637 --- /dev/null +++ b/server/licenses/lucene-queries-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +7b69ddea4bff7f803c8920df64abf2c69425ffe1 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/server/licenses/lucene-queries-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 839fdc438bfb7..0000000000000 --- a/server/licenses/lucene-queries-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7fe8b44115414a6514020174b7d75c274492100 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.0.0-snapshot-2719cf6630e.jar.sha1 b/server/licenses/lucene-queryparser-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..4c3d2594357f3 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +c4e256dbaf37d6b4e71e8bdced5a25ad8a37f3d7 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/server/licenses/lucene-queryparser-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index b95ccbc6c9523..0000000000000 --- a/server/licenses/lucene-queryparser-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -198eed372135943e6971ba78682208944a9386ae \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.0.0-snapshot-2719cf6630e.jar.sha1 b/server/licenses/lucene-sandbox-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..5a98316fb0fba --- /dev/null +++ b/server/licenses/lucene-sandbox-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +10d5c40ca9394fd3071408cf59288ede444f9fcc \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/server/licenses/lucene-sandbox-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index e33b961389be8..0000000000000 --- a/server/licenses/lucene-sandbox-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -95798905cae9f4f60b4a6f8d8c3e18e59b9e89a8 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.0.0-snapshot-2719cf6630e.jar.sha1 b/server/licenses/lucene-spatial3d-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..41e4a5a3eaeb5 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +797172aca57c1b704045d2937bb20b5aa3b3d711 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/server/licenses/lucene-spatial3d-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 37ad81feecd1f..0000000000000 --- a/server/licenses/lucene-spatial3d-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7e3c6993f306f94618a54ae1afa7cfe75ee7c4ee \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.0.0-snapshot-2719cf6630e.jar.sha1 b/server/licenses/lucene-suggest-9.0.0-snapshot-2719cf6630e.jar.sha1 new file mode 100644 index 0000000000000..956e1abf1d2a6 --- /dev/null +++ b/server/licenses/lucene-suggest-9.0.0-snapshot-2719cf6630e.jar.sha1 @@ -0,0 +1 @@ +6dc3975b1276bdd908dd4134b4c53ca2983613c4 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 b/server/licenses/lucene-suggest-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 deleted file mode 100644 index 08e578d7f1186..0000000000000 --- a/server/licenses/lucene-suggest-9.0.0-snapshot-cfd9f9f98f7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a0f9ecd3b487f93fad3d9b6817e9865b49e01eb \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java index a77412461db6c..e62a9a9ea0183 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/DiscoveryDisruptionIT.java @@ -154,7 +154,6 @@ public void testElectMasterWithLatestVersion() throws Exception { * sure that the node is removed form the cluster, that the node start pinging and that * the cluster reforms when healed. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/77751") public void testNodeNotReachableFromMaster() throws Exception { startCluster(3); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index c32c9abfa4fc9..3d786a9ccd337 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -30,10 +30,9 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeMetadata; @@ -44,6 +43,7 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster.RestartCallback; +import org.elasticsearch.xcontent.XContentFactory; import java.io.IOException; import java.nio.file.Path; @@ -516,11 +516,11 @@ public void testHalfDeletedIndexImport() throws Exception { final Path[] paths = internalCluster().getInstance(NodeEnvironment.class).nodeDataPaths(); final String nodeId = client().admin().cluster().prepareNodesInfo(nodeName).clear().get().getNodes().get(0).getNode().getId(); - writeBrokenMeta(metaStateService -> { + writeBrokenMeta(nodeEnvironment -> { for (final Path path : paths) { IOUtils.rm(path.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME)); } - metaStateService.writeGlobalState("test", Metadata.builder(metadata) + MetaStateWriterUtils.writeGlobalState(nodeEnvironment, "test", Metadata.builder(metadata) // we remove the manifest file, resetting the term and making this look like an upgrade from 6.x, so must also reset the // term in the coordination metadata .coordinationMetadata(CoordinationMetadata.builder(metadata.coordinationMetadata()).term(0L).build()) @@ -534,14 +534,14 @@ public void testHalfDeletedIndexImport() throws Exception { assertBusy(() -> assertThat(internalCluster().getInstance(NodeEnvironment.class).availableIndexFolders(), empty())); } - private void writeBrokenMeta(CheckedConsumer writer) throws Exception { - Map metaStateServices = Stream.of(internalCluster().getNodeNames()) - .collect(Collectors.toMap(Function.identity(), nodeName -> internalCluster().getInstance(MetaStateService.class, nodeName))); + private void writeBrokenMeta(CheckedConsumer writer) throws Exception { + Map nodeEnvironments = Stream.of(internalCluster().getNodeNames()) + .collect(Collectors.toMap(Function.identity(), nodeName -> internalCluster().getInstance(NodeEnvironment.class, nodeName))); internalCluster().fullRestart(new RestartCallback(){ @Override public Settings onNodeStopped(String nodeName) throws Exception { - final MetaStateService metaStateService = metaStateServices.get(nodeName); - writer.accept(metaStateService); + final NodeEnvironment nodeEnvironment = nodeEnvironments.get(nodeName); + writer.accept(nodeEnvironment); return super.onNodeStopped(nodeName); } }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/SnapshotBasedIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/SnapshotBasedIndexRecoveryIT.java index 561955271e57d..f6bca1bfc2946 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/SnapshotBasedIndexRecoveryIT.java @@ -31,11 +31,11 @@ import org.elasticsearch.common.blobstore.support.FilterBlobContainer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.MergePolicyConfig; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.shard.IndexShard; @@ -56,11 +56,15 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xcontent.NamedXContentRegistry; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -69,12 +73,15 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS; +import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -766,6 +773,7 @@ public void testRecoveryConcurrentlyWithIndexing() throws Exception { assertDocumentsAreEqual(indexName, numDocs.get()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/79455") public void testSeqNoBasedRecoveryIsUsedAfterPrimaryFailOver() throws Exception { List dataNodes = internalCluster().startDataOnlyNodes(3); String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); @@ -827,6 +835,331 @@ public void testSeqNoBasedRecoveryIsUsedAfterPrimaryFailOver() throws Exception } } + public void testRecoveryUsingSnapshotsIsThrottledPerNode() throws Exception { + executeRecoveryWithSnapshotFileDownloadThrottled((indices, + sourceNode, + targetNode, + targetMockTransportService, + recoverySnapshotFileRequests, + awaitForRecoverSnapshotFileRequestReceived, + respondToRecoverSnapshotFile) -> { + String indexRecoveredFromSnapshot1 = indices.get(0); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexRecoveredFromSnapshot1) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + awaitForRecoverSnapshotFileRequestReceived.run(); + + // Ensure that peer recoveries can make progress without restoring snapshot files + // while the permit is granted to a different recovery + String indexRecoveredFromPeer = indices.get(1); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexRecoveredFromPeer) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + ensureGreen(indexRecoveredFromPeer); + assertPeerRecoveryDidNotUseSnapshots(indexRecoveredFromPeer, sourceNode, targetNode); + + // let snapshot file restore to proceed + respondToRecoverSnapshotFile.run(); + + ensureGreen(indexRecoveredFromSnapshot1); + + assertPeerRecoveryUsedSnapshots(indexRecoveredFromSnapshot1, sourceNode, targetNode); + + for (RecoverySnapshotFileRequest recoverySnapshotFileRequest : recoverySnapshotFileRequests) { + String indexName = recoverySnapshotFileRequest.getShardId().getIndexName(); + assertThat(indexName, is(equalTo(indexRecoveredFromSnapshot1))); + } + + targetMockTransportService.clearAllRules(); + + String indexRecoveredFromSnapshot2 = indices.get(2); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexRecoveredFromSnapshot2) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + ensureGreen(indexRecoveredFromSnapshot2); + + assertPeerRecoveryUsedSnapshots(indexRecoveredFromSnapshot2, sourceNode, targetNode); + + }); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/79420") + public void testRecoveryUsingSnapshotsPermitIsReturnedAfterFailureOrCancellation() throws Exception { + executeRecoveryWithSnapshotFileDownloadThrottled((indices, + sourceNode, + targetNode, + targetMockTransportService, + recoverySnapshotFileRequests, + awaitForRecoverSnapshotFileRequestReceived, + respondToRecoverSnapshotFile) -> { + String indexRecoveredFromSnapshot1 = indices.get(0); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexRecoveredFromSnapshot1) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + awaitForRecoverSnapshotFileRequestReceived.run(); + + targetMockTransportService.clearAllRules(); + + boolean cancelRecovery = randomBoolean(); + if (cancelRecovery) { + assertAcked(client().admin().indices().prepareDelete(indexRecoveredFromSnapshot1).get()); + + respondToRecoverSnapshotFile.run(); + + assertThat(indexExists(indexRecoveredFromSnapshot1), is(equalTo(false))); + } else { + // Recovery would fail and should release the granted permit and allow other + // recoveries to use snapshots + CountDownLatch cleanFilesRequestReceived = new CountDownLatch(1); + AtomicReference channelRef = new AtomicReference<>(); + targetMockTransportService.addRequestHandlingBehavior(PeerRecoveryTargetService.Actions.CLEAN_FILES, + (handler, request, channel, task) -> { + channelRef.compareAndExchange(null, channel); + cleanFilesRequestReceived.countDown(); + } + ); + + respondToRecoverSnapshotFile.run(); + cleanFilesRequestReceived.await(); + + targetMockTransportService.clearAllRules(); + channelRef.get().sendResponse(new IOException("unable to clean files")); + } + + String indexRecoveredFromSnapshot2 = indices.get(1); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexRecoveredFromSnapshot2) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + ensureGreen(indexRecoveredFromSnapshot2); + + assertPeerRecoveryUsedSnapshots(indexRecoveredFromSnapshot2, sourceNode, targetNode); + }); + } + + public void testRecoveryReEstablishKeepsTheGrantedSnapshotFileDownloadPermit() throws Exception { + executeRecoveryWithSnapshotFileDownloadThrottled((indices, + sourceNode, + targetNode, + targetMockTransportService, + recoverySnapshotFileRequests, + awaitForRecoverSnapshotFileRequestReceived, + respondToRecoverSnapshotFile) -> { + AtomicReference startRecoveryConnection = new AtomicReference<>(); + CountDownLatch reestablishRecoverySent = new CountDownLatch(1); + targetMockTransportService.addSendBehavior((connection, requestId, action, request, options) -> { + if (action.equals(PeerRecoverySourceService.Actions.START_RECOVERY)) { + startRecoveryConnection.compareAndExchange(null, connection); + } else if (action.equals(PeerRecoverySourceService.Actions.REESTABLISH_RECOVERY)) { + reestablishRecoverySent.countDown(); + } + connection.sendRequest(requestId, action, request, options); + }); + + String indexRecoveredFromSnapshot1 = indices.get(0); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexRecoveredFromSnapshot1) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + awaitForRecoverSnapshotFileRequestReceived.run(); + + startRecoveryConnection.get().close(); + + reestablishRecoverySent.await(); + + String indexRecoveredFromPeer = indices.get(1); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexRecoveredFromPeer) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + ensureGreen(indexRecoveredFromPeer); + assertPeerRecoveryDidNotUseSnapshots(indexRecoveredFromPeer, sourceNode, targetNode); + + respondToRecoverSnapshotFile.run(); + + ensureGreen(indexRecoveredFromSnapshot1); + assertPeerRecoveryUsedSnapshots(indexRecoveredFromSnapshot1, sourceNode, targetNode); + + targetMockTransportService.clearAllRules(); + + final String indexRecoveredFromSnapshot2 = indices.get(2); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexRecoveredFromSnapshot2) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + ensureGreen(indexRecoveredFromSnapshot2); + assertPeerRecoveryUsedSnapshots(indexRecoveredFromSnapshot2, sourceNode, targetNode); + }); + } + + public void testRecoveryRetryKeepsTheGrantedSnapshotFileDownloadPermit() throws Exception { + executeRecoveryWithSnapshotFileDownloadThrottled((indices, + sourceNode, + targetNode, + targetMockTransportService, + recoverySnapshotFileRequests, + awaitForRecoverSnapshotFileRequestReceived, + respondToRecoverSnapshotFile) -> { + MockTransportService sourceMockTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, sourceNode); + + CountDownLatch startRecoveryRetryReceived = new CountDownLatch(1); + AtomicBoolean delayRecoveryExceptionSent = new AtomicBoolean(); + sourceMockTransportService.addRequestHandlingBehavior(PeerRecoverySourceService.Actions.START_RECOVERY, + (handler, request, channel, task) -> { + if (delayRecoveryExceptionSent.compareAndSet(false, true)) { + channel.sendResponse(new DelayRecoveryException("delay")); + } else { + startRecoveryRetryReceived.countDown(); + handler.messageReceived(request, channel, task); + } + }); + + String indexRecoveredFromSnapshot1 = indices.get(0); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexRecoveredFromSnapshot1) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + startRecoveryRetryReceived.await(); + sourceMockTransportService.clearAllRules(); + awaitForRecoverSnapshotFileRequestReceived.run(); + + String indexRecoveredFromPeer = indices.get(1); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexRecoveredFromPeer) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + ensureGreen(indexRecoveredFromPeer); + assertPeerRecoveryDidNotUseSnapshots(indexRecoveredFromPeer, sourceNode, targetNode); + + respondToRecoverSnapshotFile.run(); + + ensureGreen(indexRecoveredFromSnapshot1); + assertPeerRecoveryUsedSnapshots(indexRecoveredFromSnapshot1, sourceNode, targetNode); + + targetMockTransportService.clearAllRules(); + + final String indexRecoveredFromSnapshot2 = indices.get(2); + assertAcked( + client().admin().indices().prepareUpdateSettings(indexRecoveredFromSnapshot2) + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", targetNode)).get() + ); + + ensureGreen(indexRecoveredFromSnapshot2); + assertPeerRecoveryUsedSnapshots(indexRecoveredFromSnapshot2, sourceNode, targetNode); + }); + } + + + private void executeRecoveryWithSnapshotFileDownloadThrottled(SnapshotBasedRecoveryThrottlingTestCase testCase) throws Exception { + updateSetting(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), "1"); + updateSetting(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), "1"); + + try { + List dataNodes = internalCluster().startDataOnlyNodes(2); + List indices = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex(indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") + .put("index.routing.allocation.require._name", dataNodes.get(0)) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + indices.add(indexName); + } + + String repoName = "repo"; + createRepo(repoName, "fs"); + + for (String indexName : indices) { + int numDocs = randomIntBetween(300, 1000); + indexDocs(indexName, numDocs, numDocs); + + createSnapshot(repoName, "snap-" + indexName, Collections.singletonList(indexName)); + } + + String sourceNode = dataNodes.get(0); + String targetNode = dataNodes.get(1); + MockTransportService targetMockTransportService = + (MockTransportService) internalCluster().getInstance(TransportService.class, targetNode); + + List recoverySnapshotFileRequests = Collections.synchronizedList(new ArrayList<>()); + CountDownLatch recoverSnapshotFileRequestReceived = new CountDownLatch(1); + CountDownLatch respondToRecoverSnapshotFile = new CountDownLatch(1); + targetMockTransportService.addRequestHandlingBehavior(PeerRecoveryTargetService.Actions.RESTORE_FILE_FROM_SNAPSHOT, + (handler, request, channel, task) -> { + recoverySnapshotFileRequests.add((RecoverySnapshotFileRequest) request); + recoverSnapshotFileRequestReceived.countDown(); + respondToRecoverSnapshotFile.await(); + handler.messageReceived(request, channel, task); + } + ); + + testCase.execute(indices, + sourceNode, + targetNode, + targetMockTransportService, + recoverySnapshotFileRequests, + recoverSnapshotFileRequestReceived::await, + respondToRecoverSnapshotFile::countDown + ); + } finally { + updateSetting(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE.getKey(), null); + updateSetting(INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS.getKey(), null); + } + } + + interface SnapshotBasedRecoveryThrottlingTestCase { + void execute(List indices, + String sourceNode, + String targetNode, + MockTransportService targetMockTransportService, + List recoverySnapshotFileRequests, + CheckedRunnable awaitForRecoverSnapshotFileRequestReceived, + Runnable respondToRecoverSnapshotFile) throws Exception; + } + + private void assertPeerRecoveryUsedSnapshots(String indexName, String sourceNode, String targetNode) { + RecoveryState recoveryStateIndexRecoveredFromPeer = getLatestPeerRecoveryStateForShard(indexName, 0); + assertPeerRecoveryWasSuccessful(recoveryStateIndexRecoveredFromPeer, sourceNode, targetNode); + assertThat(recoveryStateIndexRecoveredFromPeer.getIndex().recoveredFromSnapshotBytes(), is(greaterThan(0L))); + } + + private void assertPeerRecoveryDidNotUseSnapshots(String indexName, String sourceNode, String targetNode) { + RecoveryState recoveryStateIndexRecoveredFromPeer = getLatestPeerRecoveryStateForShard(indexName, 0); + assertPeerRecoveryWasSuccessful(recoveryStateIndexRecoveredFromPeer, sourceNode, targetNode); + assertThat(recoveryStateIndexRecoveredFromPeer.getIndex().recoveredFromSnapshotBytes(), is(equalTo(0L))); + } + private Store.MetadataSnapshot getMetadataSnapshot(String nodeName, String indexName) throws IOException { ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeName); @@ -926,7 +1259,7 @@ private void assertSearchResponseContainsAllIndexedDocs(SearchResponse searchRes } } - private void assertPeerRecoveryWasSuccessful(RecoveryState recoveryState, String sourceNode, String targetNode) throws Exception { + private void assertPeerRecoveryWasSuccessful(RecoveryState recoveryState, String sourceNode, String targetNode) { assertThat(recoveryState.getStage(), equalTo(RecoveryState.Stage.DONE)); assertThat(recoveryState.getRecoverySource(), equalTo(RecoverySource.PeerRecoverySource.INSTANCE)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java index b3d05e7b38f2e..8fdd61855c32c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/ComposableTemplateIT.java @@ -13,18 +13,10 @@ import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.Template; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.test.ESIntegTestCase; -import java.io.IOException; import java.util.Collections; -import java.util.List; - -import static org.hamcrest.Matchers.equalTo; public class ComposableTemplateIT extends ESIntegTestCase { @@ -80,23 +72,4 @@ public void testComponentTemplatesCanBeUpdatedAfterRestart() throws Exception { client().execute(PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(cit2)).get(); } - - public void testUsageOfDataStreamFails() throws IOException { - // Exception that would happen if a unknown field is provided in a composable template: - // The thrown exception will be used to compare against the exception that is thrown when providing - // a composable template with a data stream definition. - String content = "{\"index_patterns\":[\"logs-*-*\"],\"my_field\":\"bla\"}"; - XContentParser parser = - XContentHelper.createParser(xContentRegistry(), null, new BytesArray(content), XContentType.JSON); - Exception expectedException = expectThrows(Exception.class, () -> ComposableIndexTemplate.parse(parser)); - - ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of("logs-*-*")) - .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()).build(); - Exception e = expectThrows(IllegalArgumentException.class, () -> client().execute(PutComposableIndexTemplateAction.INSTANCE, - new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(template)).actionGet()); - Exception actualException = (Exception) e.getCause(); - assertThat(actualException.getMessage(), - equalTo(expectedException.getMessage().replace("[1:32] ", "").replace("my_field", "data_stream"))); - assertThat(actualException.getMessage(), equalTo("[index_template] unknown field [data_stream]")); - } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreDynamicSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreDynamicSettingsIT.java index 595a59b7c38f6..d58b97485bd35 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreDynamicSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreDynamicSettingsIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.RepositoryConflictException; import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.ESIntegTestCase; @@ -92,11 +93,11 @@ largeSnapshotPool && randomBoolean() randomBoolean() ); } catch (Exception e) { - final Throwable ise = ExceptionsHelper.unwrap(e, IllegalStateException.class); - assertThat(ise, instanceOf(IllegalStateException.class)); + final Throwable ise = ExceptionsHelper.unwrap(e, RepositoryConflictException.class); + assertThat(ise, instanceOf(RepositoryConflictException.class)); assertEquals( ise.getMessage(), - "trying to modify or unregister repository [test-repo] that is currently used (snapshot is in progress)" + "[test-repo] trying to modify or unregister repository that is currently used (snapshot is in progress)" ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java index 20ea1d0b6799b..7a3a0d967070b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/routing/PartitionedRoutingIT.java @@ -10,7 +10,9 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; @@ -21,6 +23,8 @@ import java.util.Map; import java.util.Set; +import static org.hamcrest.CoreMatchers.containsString; + public class PartitionedRoutingIT extends ESIntegTestCase { public void testVariousPartitionSizes() throws Exception { @@ -108,6 +112,28 @@ public void testShrinking() throws Exception { } } + public void testUnableToUpdateIndexRoutingPartitionSizes() throws Exception { + Settings currentSettings = Settings.builder() + .put("index.routing_partition_size", 2) + .build(); + IndexScopedSettings indexScopedSettings = new IndexScopedSettings( + currentSettings, + Set.of(IndexMetadata.INDEX_ROUTING_PARTITION_SIZE_SETTING) + ); + Settings newSettings = Settings.builder().put("index.routing_partition_size", 3).build(); + + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> indexScopedSettings.updateDynamicSettings( + newSettings, + Settings.builder().put(currentSettings), + Settings.builder(), + "indexMetadata" + ) + ); + assertThat(exc.getMessage(), containsString("final indexMetadata setting [index.routing_partition_size]")); + } + private void verifyRoutedSearches(String index, Map> routingToDocumentIds, Set expectedShards) { for (Map.Entry> routingEntry : routingToDocumentIds.entrySet()) { String routing = routingEntry.getKey(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java index 309a75280ad46..bd54742e4518d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/geo/GeoPointScriptDocValuesIT.java @@ -1,10 +1,3 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index 014643b5a2906..d677043917bce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -260,7 +260,7 @@ public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { .collect( Collectors.toMap( SnapshotId::getUUID, - s -> new RepositoryData.SnapshotDetails(repositoryData.getSnapshotState(s), null, -1, -1) + s -> new RepositoryData.SnapshotDetails(repositoryData.getSnapshotState(s), null, -1, -1, null) ) ), Collections.emptyMap(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 7f7eb35b1ef84..214d55770b044 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -1251,7 +1251,7 @@ public void testConcurrentSnapshotAndRepoDelete() throws Exception { assertThat( e.getMessage(), containsString( - "trying to modify or unregister repository [test-repo] that is currently used (snapshot deletion is in progress)" + "[test-repo] trying to modify or unregister repository that is currently used (snapshot deletion is in progress)" ) ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index 12bb6c5e226e6..d3e5724f5f3c8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.threadpool.ThreadPool; @@ -177,7 +178,15 @@ public void testSortAndPaginateWithInProgress() throws Exception { inProgressSnapshots.add(startFullSnapshot(repoName, snapshotName)); } awaitNumberOfSnapshotsInProgress(inProgressCount); - + awaitClusterState( + state -> state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY) + .asStream() + .flatMap(s -> s.shards().stream()) + .allMatch( + e -> e.getKey().getIndexName().equals("test-index-1") == false + || e.getValue().state() == SnapshotsInProgress.ShardState.SUCCESS + ) + ); final String[] repos = { repoName }; assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.START_TIME); assertStablePagination(repos, allSnapshotNames, GetSnapshotsRequest.SortBy.NAME); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index b6ccc015cf499..489845ef7fd68 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.snapshots; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -19,8 +20,12 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.RepositoryConflictException; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryVerificationException; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.snapshots.mockstore.MockRepository; import org.elasticsearch.test.ESIntegTestCase; import java.nio.file.Path; @@ -237,4 +242,65 @@ public void testRepositoryVerification() { assertThat(ExceptionsHelper.stackTrace(ex), containsString("is not shared")); } } + + public void testRepositoryConflict() throws Exception { + logger.info("--> creating repository"); + final String repo = "test-repo"; + assertAcked( + client().admin() + .cluster() + .preparePutRepository(repo) + .setType("mock") + .setSettings( + Settings.builder() + .put("location", randomRepoPath()) + .put("random", randomAlphaOfLength(10)) + .put("wait_after_unblock", 200) + ) + .get() + ); + + logger.info("--> snapshot"); + final String index = "test-idx"; + assertAcked(prepareCreate(index, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0))); + for (int i = 0; i < 10; i++) { + indexDoc(index, Integer.toString(i), "foo", "bar" + i); + } + refresh(); + final String snapshot1 = "test-snap1"; + client().admin().cluster().prepareCreateSnapshot(repo, snapshot1).setWaitForCompletion(true).get(); + String blockedNode = internalCluster().getMasterName(); + ((MockRepository) internalCluster().getInstance(RepositoriesService.class, blockedNode).repository(repo)).blockOnDataFiles(); + logger.info("--> start deletion of snapshot"); + ActionFuture future = client().admin().cluster().prepareDeleteSnapshot(repo, snapshot1).execute(); + logger.info("--> waiting for block to kick in on node [{}]", blockedNode); + waitForBlock(blockedNode, repo); + + logger.info("--> try deleting the repository, should fail because the deletion of the snapshot is in progress"); + RepositoryConflictException e1 = expectThrows( + RepositoryConflictException.class, + () -> client().admin().cluster().prepareDeleteRepository(repo).get() + ); + assertThat(e1.status(), equalTo(RestStatus.CONFLICT)); + assertThat(e1.getMessage(), containsString("trying to modify or unregister repository that is currently used")); + + logger.info("--> try updating the repository, should fail because the deletion of the snapshot is in progress"); + RepositoryConflictException e2 = expectThrows( + RepositoryConflictException.class, + () -> client().admin() + .cluster() + .preparePutRepository(repo) + .setType("mock") + .setSettings(Settings.builder().put("location", randomRepoPath())) + .get() + ); + assertThat(e2.status(), equalTo(RestStatus.CONFLICT)); + assertThat(e2.getMessage(), containsString("trying to modify or unregister repository that is currently used")); + + logger.info("--> unblocking blocked node [{}]", blockedNode); + unblockNode(repo, blockedNode); + + logger.info("--> wait until snapshot deletion is finished"); + assertAcked(future.actionGet()); + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 7a343b9f4216c..5f68fecfda41e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.snapshots; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -30,9 +32,11 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.RestoreInProgress; +import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RoutingNodesHelper; @@ -70,10 +74,12 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Predicate; +import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -965,7 +971,7 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { logger.info("--> in-use repository deletion failed"); assertThat( ex.getMessage(), - equalTo("trying to modify or unregister repository [test-repo] that is currently used (snapshot is in progress)") + equalTo("[test-repo] trying to modify or unregister repository that is currently used (snapshot is in progress)") ); } @@ -1150,11 +1156,26 @@ public void testSnapshotStatus() throws Exception { ); // Create index on 2 nodes and make sure each node has a primary by setting no replicas - assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_replicas", 0))); + assertAcked( + prepareCreate( + "test-idx", + 2, + Settings.builder().put("number_of_replicas", 0).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(2, 10)) + ) + ); indexRandomDocs("test-idx", 100); // Pick one node and block it String blockedNode = blockNodeWithIndex("test-repo", "test-idx"); + String blockedNodeId = clusterService().state() + .getNodes() + .getDataNodes() + .values() + .stream() + .filter(n -> n.getName().equals(blockedNode)) + .map(DiscoveryNode::getId) + .findFirst() + .orElse(""); logger.info("--> snapshot"); client.admin() @@ -1168,6 +1189,22 @@ public void testSnapshotStatus() throws Exception { logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo"); + awaitClusterState(state -> { + SnapshotsInProgress snapshotsInProgress = state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); + Set snapshots = snapshotsInProgress.asStream().map(SnapshotsInProgress.Entry::snapshot).collect(Collectors.toSet()); + if (snapshots.size() != 1) { + return false; + } + SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshots.iterator().next()); + for (ObjectObjectCursor shard : entry.shards()) { + if (shard.value.nodeId().equals(blockedNodeId) == false + && shard.value.state() == SnapshotsInProgress.ShardState.SUCCESS == false) { + return false; + } + } + return true; + }); + logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot", blockedNode); SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus("test-repo").execute().actionGet(); assertThat(response.getSnapshots().size(), equalTo(1)); @@ -1208,6 +1245,10 @@ public void testSnapshotStatus() throws Exception { assertThat(getResponse.getSnapshots().size(), equalTo(1)); SnapshotInfo snapshotInfo = getResponse.getSnapshots().get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.IN_PROGRESS)); + snapshotStatus = client.admin().cluster().prepareSnapshotStatus().get().getSnapshots().get(0); + assertThat(snapshotInfo.totalShards(), equalTo(snapshotStatus.getIndices().get("test-idx").getShardsStats().getTotalShards())); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotStatus.getIndices().get("test-idx").getShardsStats().getDoneShards())); + assertThat(snapshotInfo.shardFailures().size(), equalTo(0)); logger.info("--> unblocking blocked node"); unblockNode("test-repo", blockedNode); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index 860d5544dd803..3cb6aa40060c2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -36,6 +36,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -409,7 +410,8 @@ public void testGetSnapshotsMultipleRepos() throws Exception { public void testGetSnapshotsWithSnapshotInProgress() throws Exception { createRepository("test-repo", "mock", Settings.builder().put("location", randomRepoPath()).put("block_on_data", true)); - createIndexWithContent("test-idx-1"); + String indexName = "test-idx-1"; + createIndexWithContent(indexName, indexSettingsNoReplicas(randomIntBetween(2, 10)).build()); ensureGreen(); ActionFuture createSnapshotResponseActionFuture = startFullSnapshot("test-repo", "test-snap"); @@ -418,6 +420,19 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { waitForBlockOnAnyDataNode("test-repo"); awaitNumberOfSnapshotsInProgress(1); + logger.info("--> wait for snapshots to get to a consistent state"); + awaitClusterState(state -> { + SnapshotsInProgress snapshotsInProgress = state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); + Set snapshots = snapshotsInProgress.asStream().map(SnapshotsInProgress.Entry::snapshot).collect(Collectors.toSet()); + if (snapshots.size() != 1) { + return false; + } + var shards = snapshotsInProgress.snapshot(snapshots.iterator().next()).shards(); + long initShards = shards.stream().filter(e -> e.getValue().state() == SnapshotsInProgress.ShardState.INIT).count(); + long successShards = shards.stream().filter(e -> e.getValue().state() == SnapshotsInProgress.ShardState.SUCCESS).count(); + return successShards == shards.size() - 1 && initShards == 1; + }); + GetSnapshotsResponse response1 = client().admin() .cluster() .prepareGetSnapshots("test-repo") @@ -426,7 +441,13 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { .get(); List snapshotInfoList = response1.getSnapshots(); assertEquals(1, snapshotInfoList.size()); - assertEquals(SnapshotState.IN_PROGRESS, snapshotInfoList.get(0).state()); + SnapshotInfo snapshotInfo = snapshotInfoList.get(0); + assertEquals(SnapshotState.IN_PROGRESS, snapshotInfo.state()); + + SnapshotStatus snapshotStatus = client().admin().cluster().prepareSnapshotStatus().get().getSnapshots().get(0); + assertThat(snapshotInfo.totalShards(), equalTo(snapshotStatus.getIndices().get(indexName).getShardsStats().getTotalShards())); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotStatus.getIndices().get(indexName).getShardsStats().getDoneShards())); + assertThat(snapshotInfo.shardFailures().size(), equalTo(0)); String notExistedSnapshotName = "snapshot_not_exist"; GetSnapshotsResponse response2 = client().admin() diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java deleted file mode 100644 index ff83f1ba8ec02..0000000000000 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapsingTopDocsCollector.java +++ /dev/null @@ -1,175 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.apache.lucene.search.grouping; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.FieldDoc; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.TotalHits; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.index.mapper.MappedFieldType; - -import java.io.IOException; -import java.util.Collection; -import java.util.Iterator; - -import static org.apache.lucene.search.SortField.Type.SCORE; - -/** - * A collector that groups documents based on field values and returns {@link CollapseTopFieldDocs} - * output. The collapsing is done in a single pass by selecting only the top sorted document per collapse key. - * The value used for the collapse key of each group can be found in {@link CollapseTopFieldDocs#collapseValues}. - * - * This collector optionally supports searching after a previous result through the 'after' parameter. - * - * TODO: If the sort is based on score we should propagate the mininum competitive score when orderedGroups is full. - * This is safe for collapsing since the group sort is the same as the query sort. - */ -public final class CollapsingTopDocsCollector extends FirstPassGroupingCollector { - protected final String collapseField; - protected final Sort sort; - private int totalHitCount; - - private final FieldDoc after; - private final FieldComparator comparator; - private final int reversed; - private LeafFieldComparator leafComparator; - - @SuppressWarnings("unchecked") - CollapsingTopDocsCollector(GroupSelector groupSelector, String collapseField, Sort sort, int topN, FieldDoc after) { - super(groupSelector, sort, topN); - this.collapseField = collapseField; - this.sort = sort; - this.after = after; - assert after == null || (sort.getSort().length == 1 && after.doc == Integer.MAX_VALUE); - - SortField sortField = sort.getSort()[0]; - this.comparator = sortField.getComparator(0, 0); - if (after != null) { - ((FieldComparator) comparator).setTopValue(after.fields[0]); - } - this.reversed = sortField.getReverse() ? -1 : 1; - } - - /** - * Transform {@link FirstPassGroupingCollector#getTopGroups(int)} output in - * {@link CollapseTopFieldDocs}. The collapsing needs only one pass so we can get the final top docs at the end - * of the first pass. - */ - public CollapseTopFieldDocs getTopDocs() throws IOException { - Collection> groups = super.getTopGroups(0); - if (groups == null) { - TotalHits totalHits = new TotalHits(totalHitCount, TotalHits.Relation.EQUAL_TO); - return new CollapseTopFieldDocs(collapseField, totalHits, new ScoreDoc[0], sort.getSort(), new Object[0]); - } - FieldDoc[] docs = new FieldDoc[groups.size()]; - Object[] collapseValues = new Object[groups.size()]; - int scorePos = -1; - for (int index = 0; index < sort.getSort().length; index++) { - SortField sortField = sort.getSort()[index]; - if (sortField.getType() == SCORE) { - scorePos = index; - break; - } - } - int pos = 0; - Iterator> it = orderedGroups.iterator(); - for (SearchGroup group : groups) { - assert it.hasNext(); - CollectedSearchGroup col = it.next(); - float score = Float.NaN; - if (scorePos != -1) { - score = (float) group.sortValues[scorePos]; - } - docs[pos] = new FieldDoc(col.topDoc, score, group.sortValues); - collapseValues[pos] = group.groupValue; - pos++; - } - TotalHits totalHits = new TotalHits(totalHitCount, TotalHits.Relation.EQUAL_TO); - return new CollapseTopFieldDocs(collapseField, totalHits, docs, sort.getSort(), collapseValues); - } - - @Override - public ScoreMode scoreMode() { - if (super.scoreMode().needsScores()) { - return ScoreMode.COMPLETE; - } else { - return ScoreMode.COMPLETE_NO_SCORES; - } - } - - @Override - protected void doSetNextReader(LeafReaderContext readerContext) throws IOException { - leafComparator = comparator.getLeafComparator(readerContext); - super.doSetNextReader(readerContext); - } - - @Override - public void collect(int doc) throws IOException { - totalHitCount++; - if (after != null) { - int cmp = reversed * leafComparator.compareTop(doc); - if (cmp >= 0) { - return; - } - } - super.collect(doc); - } - - /** - * Create a collapsing top docs collector on a {@link org.apache.lucene.index.NumericDocValues} field. - * It accepts also {@link org.apache.lucene.index.SortedNumericDocValues} field but - * the collect will fail with an {@link IllegalStateException} if a document contains more than one value for the - * field. - * - * @param collapseField The sort field used to group documents. - * @param collapseFieldType The {@link MappedFieldType} for this sort field. - * @param sort The {@link Sort} used to sort the collapsed hits. - * The collapsing keeps only the top sorted document per collapsed key. - * This must be non-null, ie, if you want to groupSort by relevance - * use Sort.RELEVANCE. - * @param topN How many top groups to keep. - * @param after The field values to search after. Can be null. - */ - public static CollapsingTopDocsCollector createNumeric(String collapseField, - MappedFieldType collapseFieldType, - Sort sort, - int topN, - @Nullable FieldDoc after) { - return new CollapsingTopDocsCollector<>(new CollapsingDocValuesSource.Numeric(collapseFieldType), - collapseField, sort, topN, after); - } - - /** - * Create a collapsing top docs collector on a {@link org.apache.lucene.index.SortedDocValues} field. - * It accepts also {@link org.apache.lucene.index.SortedSetDocValues} field but - * the collect will fail with an {@link IllegalStateException} if a document contains more than one value for the - * field. - * - * @param collapseField The sort field used to group documents. - * @param collapseFieldType The {@link MappedFieldType} for this sort field. - * @param sort The {@link Sort} used to sort the collapsed hits. The collapsing keeps only the top sorted - * document per collapsed key. - * This must be non-null, ie, if you want to groupSort by relevance use Sort.RELEVANCE. - * @param topN How many top groups to keep. - * @param after The field values to search after. Can be null. - */ - public static CollapsingTopDocsCollector createKeyword(String collapseField, - MappedFieldType collapseFieldType, - Sort sort, - int topN, - @Nullable FieldDoc after) { - return new CollapsingTopDocsCollector<>(new CollapsingDocValuesSource.Keyword(collapseFieldType), - collapseField, sort, topN, after); - } -} diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index c5d0938a7bdb2..d8ee54807cbd6 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1043,7 +1043,12 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.ElasticsearchAuthenticationProcessingError.class, org.elasticsearch.ElasticsearchAuthenticationProcessingError::new, 162, - Version.V_7_16_0); + Version.V_7_16_0), + REPOSITORY_CONFLICT_EXCEPTION( + org.elasticsearch.repositories.RepositoryConflictException.class, + org.elasticsearch.repositories.RepositoryConflictException::new, + 163, + Version.V_8_0_0); final Class exceptionClass; final CheckedFunction constructor; diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 3c371e59d2a90..e35e1f2ee9afe 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -91,7 +91,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_15_0 = new Version(7150099, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_7_15_1 = new Version(7150199, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_7_15_2 = new Version(7150299, org.apache.lucene.util.Version.LUCENE_8_9_0); - public static final Version V_7_16_0 = new Version(7160099, org.apache.lucene.util.Version.LUCENE_8_10_0); + public static final Version V_7_16_0 = new Version(7160099, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_9_0_0); public static final Version CURRENT = V_8_0_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 197f1c567a416..5f5793d4afcc4 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -189,6 +189,8 @@ import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.TransportBulkAction; import org.elasticsearch.action.bulk.TransportShardBulkAction; +import org.elasticsearch.action.datastreams.ModifyDataStreamsAction; +import org.elasticsearch.action.datastreams.ModifyDataStreamsTransportAction; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.explain.ExplainAction; @@ -370,6 +372,7 @@ import org.elasticsearch.rest.action.cat.RestTasksAction; import org.elasticsearch.rest.action.cat.RestTemplatesAction; import org.elasticsearch.rest.action.cat.RestThreadPoolAction; +import org.elasticsearch.rest.action.datastreams.RestModifyDataStreamsAction; import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.rest.action.document.RestDeleteAction; import org.elasticsearch.rest.action.document.RestGetAction; @@ -599,6 +602,9 @@ public void reg actions.register(AnalyzeIndexDiskUsageAction.INSTANCE, TransportAnalyzeIndexDiskUsageAction.class); actions.register(FieldUsageStatsAction.INSTANCE, TransportFieldUsageAction.class); + //Data streams + actions.register(ModifyDataStreamsAction.INSTANCE, ModifyDataStreamsTransportAction.class); + //Indexed scripts actions.register(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class); actions.register(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class); @@ -763,6 +769,9 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestReloadSecureSettingsAction()); + // Data streams + registerHandler.accept(new RestModifyDataStreamsAction()); + // Scripts API registerHandler.accept(new RestGetStoredScriptAction()); registerHandler.accept(new RestPutStoredScriptAction()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java index 8842856aa3fa6..629ce58d757a1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.admin.cluster.health; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.ActiveShardCount; @@ -35,6 +34,8 @@ public class ClusterHealthRequest extends MasterNodeReadRequest INDEX_PARSER = (XContentParser parser, Void context, String index) -> ClusterIndexHealth.innerFromXContent(parser, index); - private static final String ES_CLUSTER_HEALTH_REQUEST_TIMEOUT_200_KEY = "es.cluster_health.request_timeout_200"; + static final String ES_CLUSTER_HEALTH_REQUEST_TIMEOUT_200_KEY = "return_200_for_cluster_health_timeout"; static final String CLUSTER_HEALTH_REQUEST_TIMEOUT_DEPRECATION_MSG = "The HTTP status code for a cluster health timeout " + "will be changed from 408 to 200 in a future version. Set the [" + ES_CLUSTER_HEALTH_REQUEST_TIMEOUT_200_KEY + "] " + - "system property to [true] to suppress this message and opt in to the future behaviour now."; + "query parameter to [true] to suppress this message and opt in to the future behaviour now."; static { // ClusterStateHealth fields @@ -137,15 +137,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo private boolean timedOut = false; private ClusterStateHealth clusterStateHealth; private ClusterHealthStatus clusterHealthStatus; - private boolean esClusterHealthRequestTimeout200 = readEsClusterHealthRequestTimeout200FromProperty(); - - public ClusterHealthResponse() { - } - - /** For the testing of opting in for the 200 status code without setting a system property */ - ClusterHealthResponse(boolean esClusterHealthRequestTimeout200) { - this.esClusterHealthRequestTimeout200 = esClusterHealthRequestTimeout200; - } + private boolean return200ForClusterHealthTimeout; public ClusterHealthResponse(StreamInput in) throws IOException { super(in); @@ -157,15 +149,19 @@ public ClusterHealthResponse(StreamInput in) throws IOException { numberOfInFlightFetch = in.readInt(); delayedUnassignedShards= in.readInt(); taskMaxWaitingTime = in.readTimeValue(); + return200ForClusterHealthTimeout = in.readBoolean(); } /** needed for plugins BWC */ - public ClusterHealthResponse(String clusterName, String[] concreteIndices, ClusterState clusterState) { - this(clusterName, concreteIndices, clusterState, -1, -1, -1, TimeValue.timeValueHours(0)); + public ClusterHealthResponse(String clusterName, String[] concreteIndices, ClusterState clusterState, + boolean return200ForServerTimeout) { + this(clusterName, concreteIndices, clusterState, -1, -1, -1, TimeValue.timeValueHours(0), + return200ForServerTimeout); } public ClusterHealthResponse(String clusterName, String[] concreteIndices, ClusterState clusterState, int numberOfPendingTasks, - int numberOfInFlightFetch, int delayedUnassignedShards, TimeValue taskMaxWaitingTime) { + int numberOfInFlightFetch, int delayedUnassignedShards, TimeValue taskMaxWaitingTime, + boolean return200ForServerTimeout) { this.clusterName = clusterName; this.numberOfPendingTasks = numberOfPendingTasks; this.numberOfInFlightFetch = numberOfInFlightFetch; @@ -173,6 +169,7 @@ public ClusterHealthResponse(String clusterName, String[] concreteIndices, Clust this.taskMaxWaitingTime = taskMaxWaitingTime; this.clusterStateHealth = new ClusterStateHealth(clusterState, concreteIndices); this.clusterHealthStatus = clusterStateHealth.getStatus(); + this.return200ForClusterHealthTimeout = return200ForServerTimeout; } /** @@ -304,6 +301,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(numberOfInFlightFetch); out.writeInt(delayedUnassignedShards); out.writeTimeValue(taskMaxWaitingTime); + out.writeBoolean(return200ForClusterHealthTimeout); } @Override @@ -316,7 +314,7 @@ public RestStatus status() { if (isTimedOut() == false) { return RestStatus.OK; } - if (esClusterHealthRequestTimeout200) { + if (return200ForClusterHealthTimeout) { return RestStatus.OK; } else { deprecationLogger.compatibleCritical("cluster_health_request_timeout", CLUSTER_HEALTH_REQUEST_TIMEOUT_DEPRECATION_MSG); @@ -381,17 +379,4 @@ public int hashCode() { return Objects.hash(clusterName, numberOfPendingTasks, numberOfInFlightFetch, delayedUnassignedShards, taskMaxWaitingTime, timedOut, clusterStateHealth, clusterHealthStatus); } - - private static boolean readEsClusterHealthRequestTimeout200FromProperty() { - String property = System.getProperty(ES_CLUSTER_HEALTH_REQUEST_TIMEOUT_200_KEY); - if (property == null) { - return false; - } - if (Boolean.parseBoolean(property)) { - return true; - } else { - throw new IllegalArgumentException(ES_CLUSTER_HEALTH_REQUEST_TIMEOUT_200_KEY + " can only be unset or [true] but was [" - + property + "]"); - } - } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 83d4469e3b19d..ee261c253a2e6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -30,8 +30,8 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; @@ -225,7 +225,8 @@ private enum TimeoutState { private ClusterHealthResponse getResponse(final ClusterHealthRequest request, ClusterState clusterState, final int waitFor, TimeoutState timeoutState) { - ClusterHealthResponse response = clusterHealth(request, clusterState, clusterService.getMasterService().numberOfPendingTasks(), + ClusterHealthResponse response = clusterHealth(request, clusterState, + clusterService.getMasterService().numberOfPendingTasks(), allocationService.getNumberOfInFlightFetches(), clusterService.getMasterService().getMaxTaskWaitTime()); int readyCounter = prepareResponse(request, response, clusterState, indexNameExpressionResolver); boolean valid = (readyCounter == waitFor); @@ -324,8 +325,8 @@ static int prepareResponse(final ClusterHealthRequest request, final ClusterHeal } - private ClusterHealthResponse clusterHealth(ClusterHealthRequest request, ClusterState clusterState, int numberOfPendingTasks, - int numberOfInFlightFetch, TimeValue pendingTaskTimeInQueue) { + private ClusterHealthResponse clusterHealth(ClusterHealthRequest request, ClusterState clusterState, + int numberOfPendingTasks, int numberOfInFlightFetch, TimeValue pendingTaskTimeInQueue) { if (logger.isTraceEnabled()) { logger.trace("Calculating health based on state version [{}]", clusterState.version()); } @@ -337,12 +338,13 @@ private ClusterHealthResponse clusterHealth(ClusterHealthRequest request, Cluste // one of the specified indices is not there - treat it as RED. ClusterHealthResponse response = new ClusterHealthResponse(clusterState.getClusterName().value(), Strings.EMPTY_ARRAY, clusterState, numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), - pendingTaskTimeInQueue); + pendingTaskTimeInQueue, request.doesReturn200ForClusterHealthTimeout()); response.setStatus(ClusterHealthStatus.RED); return response; } - return new ClusterHealthResponse(clusterState.getClusterName().value(), concreteIndices, clusterState, numberOfPendingTasks, - numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), pendingTaskTimeInQueue); + return new ClusterHealthResponse(clusterState.getClusterName().value(), concreteIndices, clusterState, + numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), pendingTaskTimeInQueue, + request.doesReturn200ForClusterHealthTimeout()); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java index 0d281ba4b6944..56b068fa2101d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java @@ -8,17 +8,20 @@ package org.elasticsearch.action.admin.cluster.migration; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Objects; /** @@ -57,7 +60,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.value(featureUpgradeStatus); } builder.endArray(); - builder.field("upgrade_status", upgradeStatus); + builder.field("migration_status", upgradeStatus); builder.endObject(); return builder; } @@ -98,9 +101,18 @@ public String toString() { } public enum UpgradeStatus { - UPGRADE_NEEDED, - NO_UPGRADE_NEEDED, - IN_PROGRESS + NO_MIGRATION_NEEDED, + MIGRATION_NEEDED, + IN_PROGRESS, + ERROR; + + public static UpgradeStatus combine(UpgradeStatus... statuses) { + int statusOrd = 0; + for (UpgradeStatus status : statuses) { + statusOrd = Math.max(status.ordinal(), statusOrd); + } + return UpgradeStatus.values()[statusOrd]; + } } /** @@ -111,20 +123,20 @@ public static class FeatureUpgradeStatus implements Writeable, ToXContentObject private final String featureName; private final Version minimumIndexVersion; private final UpgradeStatus upgradeStatus; - private final List indexVersions; + private final List indexInfos; /** * @param featureName Name of the feature * @param minimumIndexVersion Earliest Elasticsearch version used to create a system index for this feature * @param upgradeStatus Whether the feature needs to be upgraded - * @param indexVersions A list of this feature's concrete indices and the Elasticsearch version that created them + * @param indexInfos A list of this feature's concrete indices and the Elasticsearch version that created them */ public FeatureUpgradeStatus(String featureName, Version minimumIndexVersion, - UpgradeStatus upgradeStatus, List indexVersions) { + UpgradeStatus upgradeStatus, List indexInfos) { this.featureName = featureName; this.minimumIndexVersion = minimumIndexVersion; this.upgradeStatus = upgradeStatus; - this.indexVersions = indexVersions; + this.indexInfos = indexInfos; } /** @@ -135,7 +147,7 @@ public FeatureUpgradeStatus(StreamInput in) throws IOException { this.featureName = in.readString(); this.minimumIndexVersion = Version.readVersion(in); this.upgradeStatus = in.readEnum(UpgradeStatus.class); - this.indexVersions = in.readList(IndexVersion::new); + this.indexInfos = in.readList(IndexInfo::new); } public String getFeatureName() { @@ -150,8 +162,8 @@ public UpgradeStatus getUpgradeStatus() { return this.upgradeStatus; } - public List getIndexVersions() { - return this.indexVersions; + public List getIndexVersions() { + return this.indexInfos; } @Override @@ -159,7 +171,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(this.featureName); Version.writeVersion(this.minimumIndexVersion, out); out.writeEnum(this.upgradeStatus); - out.writeList(this.indexVersions); + out.writeList(this.indexInfos); } @Override @@ -167,9 +179,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field("feature_name", this.featureName); builder.field("minimum_index_version", this.minimumIndexVersion.toString()); - builder.field("upgrade_status", this.upgradeStatus); + builder.field("migration_status", this.upgradeStatus); builder.startArray("indices"); - for (IndexVersion version : this.indexVersions) { + for (IndexInfo version : this.indexInfos) { builder.value(version); } builder.endArray(); @@ -185,12 +197,12 @@ public boolean equals(Object o) { return Objects.equals(featureName, that.featureName) && Objects.equals(minimumIndexVersion, that.minimumIndexVersion) && Objects.equals(upgradeStatus, that.upgradeStatus) - && Objects.equals(indexVersions, that.indexVersions); + && Objects.equals(indexInfos, that.indexInfos); } @Override public int hashCode() { - return Objects.hash(featureName, minimumIndexVersion, upgradeStatus, indexVersions); + return Objects.hash(featureName, minimumIndexVersion, upgradeStatus, indexInfos); } @Override @@ -199,7 +211,7 @@ public String toString() { "featureName='" + featureName + '\'' + ", minimumIndexVersion='" + minimumIndexVersion + '\'' + ", upgradeStatus='" + upgradeStatus + '\'' + - ", indexVersions=" + indexVersions + + ", indexInfos=" + indexInfos + '}'; } } @@ -207,26 +219,38 @@ public String toString() { /** * A data class that holds an index name and the version of Elasticsearch with which that index was created */ - public static class IndexVersion implements Writeable, ToXContentObject { + public static class IndexInfo implements Writeable, ToXContentObject { + private static final Map STACK_TRACE_ENABLED_PARAMS = + Map.of(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false"); + private final String indexName; private final Version version; + @Nullable private final Exception exception; // Present if this index failed /** * @param indexName Name of the index * @param version Version of Elasticsearch that created the index + * @param exception The exception that this index's migration failed with, if applicable */ - public IndexVersion(String indexName, Version version) { + public IndexInfo(String indexName, Version version, Exception exception) { this.indexName = indexName; this.version = version; + this.exception = exception; } /** * @param in A stream input for a serialized index version object * @throws IOException if we can't deserialize the object */ - public IndexVersion(StreamInput in) throws IOException { + public IndexInfo(StreamInput in) throws IOException { this.indexName = in.readString(); this.version = Version.readVersion(in); + boolean hasException = in.readBoolean(); + if (hasException) { + this.exception = in.readException(); + } else { + this.exception = null; + } } public String getIndexName() { @@ -237,17 +261,36 @@ public Version getVersion() { return this.version; } + public Exception getException() { + return this.exception; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(this.indexName); Version.writeVersion(this.version, out); + if (exception != null) { + out.writeBoolean(true); + out.writeException(this.exception); + } else { + out.writeBoolean(false); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + Params exceptionParams = new DelegatingMapParams(STACK_TRACE_ENABLED_PARAMS, params); + builder.startObject(); builder.field("index", this.indexName); builder.field("version", this.version.toString()); + if (exception != null) { + builder.startObject("failure_cause"); + { + ElasticsearchException.generateFailureXContent(builder, exceptionParams, exception, true); + } + builder.endObject(); + } builder.endObject(); return builder; } @@ -256,7 +299,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - IndexVersion that = (IndexVersion) o; + IndexInfo that = (IndexInfo) o; return indexName.equals(that.indexName) && version.equals(that.version); } @@ -267,9 +310,10 @@ public int hashCode() { @Override public String toString() { - return "IndexVersion{" + + return "IndexInfo{" + "indexName='" + indexName + '\'' + ", version='" + version + '\'' + + ", exception='" + exception.getMessage() + "'" + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java index b8777d91437e1..04b1031cd6dc7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java @@ -19,25 +19,38 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.upgrades.FeatureMigrationResults; +import org.elasticsearch.upgrades.SingleFeatureMigrationResult; +import org.elasticsearch.upgrades.SystemIndexMigrationTaskState; import java.util.Collection; +import java.util.Comparator; import java.util.List; -import java.util.Map; +import java.util.Optional; import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_UPGRADE_NEEDED; -import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.UPGRADE_NEEDED; +import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.ERROR; +import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.IN_PROGRESS; +import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED; +import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.MIGRATION_NEEDED; +import static org.elasticsearch.upgrades.SystemIndexMigrationTaskParams.SYSTEM_INDEX_UPGRADE_TASK_NAME; /** * Transport class for the get feature upgrade status action */ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeAction< - GetFeatureUpgradeStatusRequest, - GetFeatureUpgradeStatusResponse> { + GetFeatureUpgradeStatusRequest, + GetFeatureUpgradeStatusResponse> { + + /** + * This version is only valid for >=8.0.0 and should be changed on backport. + */ + public static final Version NO_UPGRADE_REQUIRED_VERSION = Version.V_8_0_0; private final SystemIndices systemIndices; @@ -65,55 +78,91 @@ public TransportGetFeatureUpgradeStatusAction( } @Override - protected void masterOperation(Task task, GetFeatureUpgradeStatusRequest request, ClusterState state, - ActionListener listener) throws Exception { - - List features = systemIndices.getFeatures().entrySet().stream() - .sorted(Map.Entry.comparingByKey()) - .map(entry -> getFeatureUpgradeStatus(state, entry)) + protected void masterOperation( + Task task, + GetFeatureUpgradeStatusRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + + List features = systemIndices.getFeatures() + .values() + .stream() + .sorted(Comparator.comparing(SystemIndices.Feature::getName)) + .map(feature -> getFeatureUpgradeStatus(state, feature)) .collect(Collectors.toList()); - boolean isUpgradeNeeded = features.stream() - .map(GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus::getMinimumIndexVersion) - .min(Version::compareTo) - .orElse(Version.CURRENT) - .before(Version.V_7_0_0); + GetFeatureUpgradeStatusResponse.UpgradeStatus status = features.stream() + .map(GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus::getUpgradeStatus) + .reduce(GetFeatureUpgradeStatusResponse.UpgradeStatus::combine) + .orElseGet(() -> { + assert false : "get feature statuses API doesn't have any features"; + return NO_MIGRATION_NEEDED; + }); - listener.onResponse(new GetFeatureUpgradeStatusResponse(features, isUpgradeNeeded ? UPGRADE_NEEDED : NO_UPGRADE_NEEDED)); + listener.onResponse(new GetFeatureUpgradeStatusResponse(features, status)); } - // visible for testing static GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus getFeatureUpgradeStatus( - ClusterState state, Map.Entry entry) { + ClusterState state, + SystemIndices.Feature feature + ) { + String featureName = feature.getName(); - String featureName = entry.getKey(); - SystemIndices.Feature feature = entry.getValue(); + final String currentFeature = Optional.ofNullable( + state.metadata().custom(PersistentTasksCustomMetadata.TYPE) + ) + .map(tasksMetdata -> tasksMetdata.getTask(SYSTEM_INDEX_UPGRADE_TASK_NAME)) + .map(task -> task.getState()) + .map(taskState -> ((SystemIndexMigrationTaskState) taskState).getCurrentFeature()) + .orElse(null); - List indexVersions = getIndexVersions(state, feature); + List indexInfos = getIndexInfos(state, feature); - Version minimumVersion = indexVersions.stream() - .map(GetFeatureUpgradeStatusResponse.IndexVersion::getVersion) + Version minimumVersion = indexInfos.stream() + .map(GetFeatureUpgradeStatusResponse.IndexInfo::getVersion) .min(Version::compareTo) .orElse(Version.CURRENT); - - return new GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus( - featureName, - minimumVersion, - minimumVersion.before(Version.V_7_0_0) ? UPGRADE_NEEDED : NO_UPGRADE_NEEDED, - indexVersions - ); + GetFeatureUpgradeStatusResponse.UpgradeStatus initialStatus; + if (featureName.equals(currentFeature)) { + initialStatus = IN_PROGRESS; + } else if (minimumVersion.before(NO_UPGRADE_REQUIRED_VERSION)) { + initialStatus = MIGRATION_NEEDED; + } else { + initialStatus = NO_MIGRATION_NEEDED; + } + + GetFeatureUpgradeStatusResponse.UpgradeStatus status = indexInfos.stream() + .filter(idxInfo -> idxInfo.getException() != null) + .findFirst() + .map(idxInfo -> ERROR) + .map(idxStatus -> GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(idxStatus, initialStatus)) + .orElse(initialStatus); + + return new GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus(featureName, minimumVersion, status, indexInfos); } // visible for testing - static List getIndexVersions(ClusterState state, SystemIndices.Feature feature) { + static List getIndexInfos(ClusterState state, SystemIndices.Feature feature) { + final SingleFeatureMigrationResult featureStatus = Optional.ofNullable( + (FeatureMigrationResults) state.metadata().custom(FeatureMigrationResults.TYPE) + ).map(FeatureMigrationResults::getFeatureStatuses).map(results -> results.get(feature.getName())).orElse(null); + + final String failedFeatureName = featureStatus == null ? null : featureStatus.getFailedIndexName(); + final Exception exception = featureStatus == null ? null : featureStatus.getException(); + return Stream.of(feature.getIndexDescriptors(), feature.getAssociatedIndexDescriptors()) .flatMap(Collection::stream) .flatMap(descriptor -> descriptor.getMatchingIndices(state.metadata()).stream()) .sorted(String::compareTo) .map(index -> state.metadata().index(index)) - .map(indexMetadata -> new GetFeatureUpgradeStatusResponse.IndexVersion( - indexMetadata.getIndex().getName(), - indexMetadata.getCreationVersion())) + .map( + indexMetadata -> new GetFeatureUpgradeStatusResponse.IndexInfo( + indexMetadata.getIndex().getName(), + indexMetadata.getCreationVersion(), + indexMetadata.getIndex().getName().equals(failedFeatureName) ? exception : null + ) + ) .collect(Collectors.toList()); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java index 3ede3d4c4b463..b7ea3d4c7f817 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java @@ -8,6 +8,9 @@ package org.elasticsearch.action.admin.cluster.migration; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -18,12 +21,18 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.upgrades.SystemIndexMigrationTaskParams; -import java.util.ArrayList; +import java.util.Comparator; import java.util.List; +import java.util.stream.Collectors; + +import static org.elasticsearch.action.admin.cluster.migration.TransportGetFeatureUpgradeStatusAction.getFeatureUpgradeStatus; +import static org.elasticsearch.upgrades.SystemIndexMigrationTaskParams.SYSTEM_INDEX_UPGRADE_TASK_NAME; /** * Transport action for post feature upgrade action @@ -31,8 +40,10 @@ public class TransportPostFeatureUpgradeAction extends TransportMasterNodeAction< PostFeatureUpgradeRequest, PostFeatureUpgradeResponse> { + private static final Logger logger = LogManager.getLogger(TransportPostFeatureUpgradeAction.class); private final SystemIndices systemIndices; + private final PersistentTasksService persistentTasksService; @Inject public TransportPostFeatureUpgradeAction( @@ -41,7 +52,8 @@ public TransportPostFeatureUpgradeAction( ActionFilters actionFilters, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SystemIndices systemIndices + SystemIndices systemIndices, + PersistentTasksService persistentTasksService ) { super( PostFeatureUpgradeAction.NAME, @@ -55,20 +67,46 @@ public TransportPostFeatureUpgradeAction( ThreadPool.Names.SAME ); this.systemIndices = systemIndices; + this.persistentTasksService = persistentTasksService; } @Override - protected void masterOperation(Task task, PostFeatureUpgradeRequest request, ClusterState state, - ActionListener listener) throws Exception { - List features = new ArrayList<>(); - features.add(new PostFeatureUpgradeResponse.Feature("security")); - listener.onResponse(new PostFeatureUpgradeResponse( - // TODO: implement operation for this action - true, features, null, null)); + protected void masterOperation( + Task task, + PostFeatureUpgradeRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + List featuresToMigrate = systemIndices.getFeatures() + .values() + .stream() + .map(feature -> getFeatureUpgradeStatus(state, feature)) + .filter(status -> status.getUpgradeStatus().equals(GetFeatureUpgradeStatusResponse.UpgradeStatus.MIGRATION_NEEDED)) + .map(GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus::getFeatureName) + .map(PostFeatureUpgradeResponse.Feature::new) + .sorted(Comparator.comparing(PostFeatureUpgradeResponse.Feature::getFeatureName)) // consistent ordering to simplify testing + .collect(Collectors.toList()); + + if (featuresToMigrate.isEmpty() == false) { + persistentTasksService.sendStartRequest( + SYSTEM_INDEX_UPGRADE_TASK_NAME, + SYSTEM_INDEX_UPGRADE_TASK_NAME, + new SystemIndexMigrationTaskParams(), + ActionListener.wrap(startedTask -> { + listener.onResponse(new PostFeatureUpgradeResponse(true, featuresToMigrate, null, null)); + }, ex -> { + logger.error("failed to start system index upgrade task", ex); + + listener.onResponse(new PostFeatureUpgradeResponse(false, null, null, new ElasticsearchException(ex))); + }) + ); + } else { + listener.onResponse(new PostFeatureUpgradeResponse(false, null, "No system indices require migration", null)); + } } @Override protected ClusterBlockException checkBlock(PostFeatureUpgradeRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index 2be13918c797e..3d4b690c5bba7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.cluster.node.hotthreads; +import org.elasticsearch.Version; import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; @@ -22,6 +23,7 @@ public class NodesHotThreadsRequest extends BaseNodesRequest currentSnapshots(SnapshotsInProgress snapshots Collections.emptyList() ); for (SnapshotsInProgress.Entry entry : entries) { - snapshotList.add(new SnapshotInfo(entry)); + snapshotList.add(SnapshotInfo.inProgress(entry)); } return snapshotList; } @@ -312,10 +312,9 @@ private void loadSnapshotInfos( return; } - final BiPredicate preflightPredicate = predicates.preflightPredicate(); if (repositoryData != null) { for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { - if (preflightPredicate == null || preflightPredicate.test(snapshotId, repositoryData)) { + if (predicates.test(snapshotId, repositoryData)) { allSnapshotIds.put(snapshotId.getName(), new Snapshot(repo, snapshotId)); } } @@ -380,11 +379,11 @@ private void loadSnapshotInfos( sortBy, after, order, - predicates.snapshotPredicate(), + predicates, listener ); } else { - assert predicates.snapshotPredicate() == null : "filtering is not supported in non-verbose mode"; + assert predicates.isMatchAll() : "filtering is not supported in non-verbose mode"; final SnapshotsInRepo snapshotInfos; if (repositoryData != null) { // want non-current snapshots as well, which are found in the repository data @@ -420,7 +419,7 @@ private void snapshots( GetSnapshotsRequest.SortBy sortBy, @Nullable GetSnapshotsRequest.After after, SortOrder order, - @Nullable Predicate predicate, + SnapshotPredicates predicate, ActionListener listener ) { if (task.notifyIfCancelled(listener)) { @@ -436,9 +435,9 @@ private void snapshots( ); for (SnapshotsInProgress.Entry entry : entries) { if (snapshotIdsToIterate.remove(entry.snapshot().getSnapshotId())) { - final SnapshotInfo snapshotInfo = new SnapshotInfo(entry); - if (predicate == null || predicate.test(snapshotInfo)) { - snapshotSet.add(new SnapshotInfo(entry)); + final SnapshotInfo snapshotInfo = SnapshotInfo.inProgress(entry); + if (predicate.test(snapshotInfo)) { + snapshotSet.add(SnapshotInfo.inProgress(entry)); } } } @@ -466,17 +465,11 @@ private void snapshots( return; } repository.getSnapshotInfo( - new GetSnapshotInfoContext( - snapshotIdsToIterate, - ignoreUnavailable == false, - task::isCancelled, - predicate == null ? (context, snapshotInfo) -> snapshotInfos.add(snapshotInfo) : (context, snapshotInfo) -> { - if (predicate.test(snapshotInfo)) { - snapshotInfos.add(snapshotInfo); - } - }, - allDoneListener - ) + new GetSnapshotInfoContext(snapshotIdsToIterate, ignoreUnavailable == false, task::isCancelled, (context, snapshotInfo) -> { + if (predicate.test(snapshotInfo)) { + snapshotInfos.add(snapshotInfo); + } + }, allDoneListener) ); } @@ -666,49 +659,6 @@ private static Predicate buildAfterPredicate( } } - private static Predicate filterBySLMPolicies(String[] slmPolicies) { - final List includePatterns = new ArrayList<>(); - final List excludePatterns = new ArrayList<>(); - boolean seenWildcard = false; - boolean matchNoPolicy = false; - for (String slmPolicy : slmPolicies) { - if (seenWildcard && slmPolicy.length() > 1 && slmPolicy.startsWith("-")) { - excludePatterns.add(slmPolicy.substring(1)); - } else { - if (Regex.isSimpleMatchPattern(slmPolicy)) { - seenWildcard = true; - } else if (GetSnapshotsRequest.NO_POLICY_PATTERN.equals(slmPolicy)) { - matchNoPolicy = true; - } - includePatterns.add(slmPolicy); - } - } - final String[] includes = includePatterns.toArray(Strings.EMPTY_ARRAY); - final String[] excludes = excludePatterns.toArray(Strings.EMPTY_ARRAY); - final boolean matchWithoutPolicy = matchNoPolicy; - return snapshotInfo -> { - final Map metadata = snapshotInfo.userMetadata(); - final String policy; - if (metadata == null) { - policy = null; - } else { - final Object policyFound = metadata.get(SnapshotsService.POLICY_ID_METADATA_FIELD); - policy = policyFound instanceof String ? (String) policyFound : null; - } - if (policy == null) { - return matchWithoutPolicy; - } - if (Regex.simpleMatch(includes, policy) == false) { - return false; - } - return excludes.length == 0 || Regex.simpleMatch(excludes, policy) == false; - }; - } - - private static Predicate filterByLongOffset(ToLongFunction extractor, long after, SortOrder order) { - return order == SortOrder.ASC ? info -> after <= extractor.applyAsLong(info) : info -> after >= extractor.applyAsLong(info); - } - private static Predicate filterByLongOffset( ToLongFunction extractor, long after, @@ -742,102 +692,181 @@ private static int compareName(String name, String repoName, SnapshotInfo info) } /** - * A pair of predicates for the get snapshots action. The {@link #preflightPredicate()} is applied to combinations of snapshot id and - * repository data to determine which snapshots to fully load from the repository and rules out all snapshots that do not match the - * given {@link GetSnapshotsRequest} that can be ruled out through the information in {@link RepositoryData}. - * The predicate returned by {@link #snapshotPredicate()} is then applied the instances of {@link SnapshotInfo} that were loaded from - * the repository to filter out those remaining that did not match the request but could not be ruled out without loading their - * {@link SnapshotInfo}. + * A pair of predicates for the get snapshots action. The {@link #test(SnapshotId, RepositoryData)} predicate is applied to combinations + * of snapshot id and repository data to determine which snapshots to fully load from the repository and rules out all snapshots that do + * not match the given {@link GetSnapshotsRequest} that can be ruled out through the information in {@link RepositoryData}. + * The predicate returned by {@link #test(SnapshotInfo)} predicate is then applied the instances of {@link SnapshotInfo} that were + * loaded from the repository to filter out those remaining that did not match the request but could not be ruled out without loading + * their {@link SnapshotInfo}. */ private static final class SnapshotPredicates { - private final Predicate snapshotPredicate; + private static final SnapshotPredicates MATCH_ALL = new SnapshotPredicates(null, null); + @Nullable // if all snapshot IDs match private final BiPredicate preflightPredicate; - SnapshotPredicates(GetSnapshotsRequest request) { - Predicate snapshotPredicate = null; - final String[] slmPolicies = request.policies(); - final String fromSortValue = request.fromSortValue(); - if (slmPolicies.length > 0) { - snapshotPredicate = filterBySLMPolicies(slmPolicies); + @Nullable // if all snapshots match + private final Predicate snapshotPredicate; + + private SnapshotPredicates( + @Nullable BiPredicate preflightPredicate, + @Nullable Predicate snapshotPredicate + ) { + this.snapshotPredicate = snapshotPredicate; + this.preflightPredicate = preflightPredicate; + } + + boolean test(SnapshotId snapshotId, RepositoryData repositoryData) { + return preflightPredicate == null || preflightPredicate.test(snapshotId, repositoryData); + } + + boolean isMatchAll() { + return snapshotPredicate == null; + } + + boolean test(SnapshotInfo snapshotInfo) { + return snapshotPredicate == null || snapshotPredicate.test(snapshotInfo); + } + + private SnapshotPredicates and(SnapshotPredicates other) { + return this == MATCH_ALL ? other + : other == MATCH_ALL ? this + : new SnapshotPredicates( + preflightPredicate == null ? other.preflightPredicate : other.preflightPredicate == null ? preflightPredicate : null, + snapshotPredicate == null ? other.snapshotPredicate : other.snapshotPredicate == null ? snapshotPredicate : null + ); + } + + static SnapshotPredicates fromRequest(GetSnapshotsRequest request) { + return getSortValuePredicate(request.fromSortValue(), request.sort(), request.order()).and( + getSlmPredicates(request.policies()) + ); + } + + private static SnapshotPredicates getSlmPredicates(String[] slmPolicies) { + if (slmPolicies.length == 0) { + return MATCH_ALL; } - final GetSnapshotsRequest.SortBy sortBy = request.sort(); - final SortOrder order = request.order(); - if (fromSortValue == null) { - preflightPredicate = null; - } else { - final Predicate fromSortValuePredicate; - switch (sortBy) { - case START_TIME: - final long after = Long.parseLong(fromSortValue); - preflightPredicate = order == SortOrder.ASC ? (snapshotId, repositoryData) -> { - final long startTime = getStartTime(snapshotId, repositoryData); - return startTime == -1 || after <= startTime; - } : (snapshotId, repositoryData) -> { - final long startTime = getStartTime(snapshotId, repositoryData); - return startTime == -1 || after >= startTime; - }; - fromSortValuePredicate = filterByLongOffset(SnapshotInfo::startTime, after, order); - break; - case NAME: - preflightPredicate = order == SortOrder.ASC - ? (snapshotId, repositoryData) -> fromSortValue.compareTo(snapshotId.getName()) <= 0 - : (snapshotId, repositoryData) -> fromSortValue.compareTo(snapshotId.getName()) >= 0; - fromSortValuePredicate = null; - break; - case DURATION: - final long afterDuration = Long.parseLong(fromSortValue); - preflightPredicate = order == SortOrder.ASC ? (snapshotId, repositoryData) -> { - final long duration = getDuration(snapshotId, repositoryData); - return duration == -1 || afterDuration <= duration; - } : (snapshotId, repositoryData) -> { - final long duration = getDuration(snapshotId, repositoryData); - return duration == -1 || afterDuration >= duration; - }; - fromSortValuePredicate = filterByLongOffset(info -> info.endTime() - info.startTime(), afterDuration, order); - break; - case INDICES: - final int afterIndexCount = Integer.parseInt(fromSortValue); - preflightPredicate = order == SortOrder.ASC - ? (snapshotId, repositoryData) -> afterIndexCount <= indexCount(snapshotId, repositoryData) - : (snapshotId, repositoryData) -> afterIndexCount >= indexCount(snapshotId, repositoryData); - fromSortValuePredicate = null; - break; - case REPOSITORY: - // already handled in #maybeFilterRepositories - preflightPredicate = null; - fromSortValuePredicate = null; - break; - case SHARDS: - preflightPredicate = null; - fromSortValuePredicate = filterByLongOffset(SnapshotInfo::totalShards, Integer.parseInt(fromSortValue), order); - break; - case FAILED_SHARDS: - preflightPredicate = null; - fromSortValuePredicate = filterByLongOffset(SnapshotInfo::failedShards, Integer.parseInt(fromSortValue), order); - break; - default: - throw new AssertionError("unexpected sort column [" + sortBy + "]"); - } - if (snapshotPredicate == null) { - snapshotPredicate = fromSortValuePredicate; - } else if (fromSortValuePredicate != null) { - snapshotPredicate = fromSortValuePredicate.and(snapshotPredicate); + final List includePatterns = new ArrayList<>(); + final List excludePatterns = new ArrayList<>(); + boolean seenWildcard = false; + boolean matchNoPolicy = false; + for (String slmPolicy : slmPolicies) { + if (seenWildcard && slmPolicy.length() > 1 && slmPolicy.startsWith("-")) { + excludePatterns.add(slmPolicy.substring(1)); + } else { + if (Regex.isSimpleMatchPattern(slmPolicy)) { + seenWildcard = true; + } else if (GetSnapshotsRequest.NO_POLICY_PATTERN.equals(slmPolicy)) { + matchNoPolicy = true; + } + includePatterns.add(slmPolicy); } } - this.snapshotPredicate = snapshotPredicate; + final String[] includes = includePatterns.toArray(Strings.EMPTY_ARRAY); + final String[] excludes = excludePatterns.toArray(Strings.EMPTY_ARRAY); + final boolean matchWithoutPolicy = matchNoPolicy; + return new SnapshotPredicates(((snapshotId, repositoryData) -> { + final RepositoryData.SnapshotDetails details = repositoryData.getSnapshotDetails(snapshotId); + final String policy; + if (details == null || (details.getSlmPolicy() == null)) { + // no SLM policy recorded + return true; + } else { + final String policyFound = details.getSlmPolicy(); + // empty string means that snapshot was not created by an SLM policy + policy = policyFound.isEmpty() ? null : policyFound; + } + return matchPolicy(includes, excludes, matchWithoutPolicy, policy); + }), snapshotInfo -> { + final Map metadata = snapshotInfo.userMetadata(); + final String policy; + if (metadata == null) { + policy = null; + } else { + final Object policyFound = metadata.get(SnapshotsService.POLICY_ID_METADATA_FIELD); + policy = policyFound instanceof String ? (String) policyFound : null; + } + return matchPolicy(includes, excludes, matchWithoutPolicy, policy); + }); + } + + private static boolean matchPolicy(String[] includes, String[] excludes, boolean matchWithoutPolicy, @Nullable String policy) { + if (policy == null) { + return matchWithoutPolicy; + } + if (Regex.simpleMatch(includes, policy) == false) { + return false; + } + return excludes.length == 0 || Regex.simpleMatch(excludes, policy) == false; } - @Nullable - public Predicate snapshotPredicate() { - return snapshotPredicate; + private static SnapshotPredicates getSortValuePredicate(String fromSortValue, GetSnapshotsRequest.SortBy sortBy, SortOrder order) { + if (fromSortValue == null) { + return MATCH_ALL; + } + + switch (sortBy) { + case START_TIME: + final long after = Long.parseLong(fromSortValue); + return new SnapshotPredicates(order == SortOrder.ASC ? (snapshotId, repositoryData) -> { + final long startTime = getStartTime(snapshotId, repositoryData); + return startTime == -1 || after <= startTime; + } : (snapshotId, repositoryData) -> { + final long startTime = getStartTime(snapshotId, repositoryData); + return startTime == -1 || after >= startTime; + }, filterByLongOffset(SnapshotInfo::startTime, after, order)); + + case NAME: + return new SnapshotPredicates( + order == SortOrder.ASC + ? (snapshotId, repositoryData) -> fromSortValue.compareTo(snapshotId.getName()) <= 0 + : (snapshotId, repositoryData) -> fromSortValue.compareTo(snapshotId.getName()) >= 0, + null + ); + + case DURATION: + final long afterDuration = Long.parseLong(fromSortValue); + return new SnapshotPredicates(order == SortOrder.ASC ? (snapshotId, repositoryData) -> { + final long duration = getDuration(snapshotId, repositoryData); + return duration == -1 || afterDuration <= duration; + } : (snapshotId, repositoryData) -> { + final long duration = getDuration(snapshotId, repositoryData); + return duration == -1 || afterDuration >= duration; + }, filterByLongOffset(info -> info.endTime() - info.startTime(), afterDuration, order)); + + case INDICES: + final int afterIndexCount = Integer.parseInt(fromSortValue); + return new SnapshotPredicates( + order == SortOrder.ASC + ? (snapshotId, repositoryData) -> afterIndexCount <= indexCount(snapshotId, repositoryData) + : (snapshotId, repositoryData) -> afterIndexCount >= indexCount(snapshotId, repositoryData), + null + ); + + case REPOSITORY: + // already handled in #maybeFilterRepositories + return MATCH_ALL; + + case SHARDS: + return new SnapshotPredicates( + null, + filterByLongOffset(SnapshotInfo::totalShards, Integer.parseInt(fromSortValue), order) + ); + case FAILED_SHARDS: + return new SnapshotPredicates( + null, + filterByLongOffset(SnapshotInfo::failedShards, Integer.parseInt(fromSortValue), order) + ); + default: + throw new AssertionError("unexpected sort column [" + sortBy + "]"); + } } - @Nullable - public BiPredicate preflightPredicate() { - return preflightPredicate; + private static Predicate filterByLongOffset(ToLongFunction extractor, long after, SortOrder order) { + return order == SortOrder.ASC ? info -> after <= extractor.applyAsLong(info) : info -> after >= extractor.applyAsLong(info); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java index 6ba80a4351efc..d676f7664583d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java @@ -59,7 +59,8 @@ public static MappingStats of(Metadata metadata, Runnable ensureNotCancelled) { Set indexRuntimeFieldTypes = new HashSet<>(); MappingMetadata mappingMetadata = indexMetadata.mapping(); if (mappingMetadata != null) { - MappingVisitor.visitMapping(mappingMetadata.getSourceAsMap(), (field, fieldMapping) -> { + final Map map = mappingMetadata.getSourceAsMap(); + MappingVisitor.visitMapping(map, (field, fieldMapping) -> { concreteFieldNames.add(field); String type = null; Object typeO = fieldMapping.get("type"); @@ -88,7 +89,7 @@ public static MappingStats of(Metadata metadata, Runnable ensureNotCancelled) { } }); - MappingVisitor.visitRuntimeMapping(mappingMetadata.getSourceAsMap(), (field, fieldMapping) -> { + MappingVisitor.visitRuntimeMapping(map, (field, fieldMapping) -> { Object typeObject = fieldMapping.get("type"); if (typeObject == null) { return; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingVisitor.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingVisitor.java index 4ba20c1c0a90a..52cef7c6e9da6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingVisitor.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingVisitor.java @@ -19,31 +19,34 @@ public static void visitMapping(Map mapping, BiConsumer mapping, String path, BiConsumer> fieldMappingConsumer) { + private static void visitMapping(final Map mapping, + final String path, + final BiConsumer> fieldMappingConsumer) { Object properties = mapping.get("properties"); if (properties instanceof Map) { @SuppressWarnings("unchecked") Map propertiesAsMap = (Map) properties; - for (String field : propertiesAsMap.keySet()) { - Object v = propertiesAsMap.get(field); + for (Map.Entry entry : propertiesAsMap.entrySet()) { + final Object v = entry.getValue(); if (v instanceof Map) { @SuppressWarnings("unchecked") Map fieldMapping = (Map) v; - fieldMappingConsumer.accept(path + field, fieldMapping); - visitMapping(fieldMapping, path + field + ".", fieldMappingConsumer); + final String prefix = path + entry.getKey(); + fieldMappingConsumer.accept(prefix, fieldMapping); + visitMapping(fieldMapping, prefix + ".", fieldMappingConsumer); // Multi fields Object fieldsO = fieldMapping.get("fields"); if (fieldsO instanceof Map) { @SuppressWarnings("unchecked") Map fields = (Map) fieldsO; - for (String subfield : fields.keySet()) { - Object v2 = fields.get(subfield); + for (Map.Entry subfieldEntry : fields.entrySet()) { + Object v2 = subfieldEntry.getValue(); if (v2 instanceof Map) { @SuppressWarnings("unchecked") Map fieldMapping2 = (Map) v2; - fieldMappingConsumer.accept(path + field + "." + subfield, fieldMapping2); + fieldMappingConsumer.accept(prefix + "." + subfieldEntry.getKey(), fieldMapping2); } } } @@ -59,14 +62,14 @@ public static void visitRuntimeMapping(Map mapping, BiConsumer runtimeMappings = (Map) runtimeObject; - for (String runtimeFieldName : runtimeMappings.keySet()) { - Object runtimeFieldMappingObject = runtimeMappings.get(runtimeFieldName); + for (Map.Entry entry : runtimeMappings.entrySet()) { + final Object runtimeFieldMappingObject = entry.getValue(); if (runtimeFieldMappingObject instanceof Map == false) { continue; } @SuppressWarnings("unchecked") Map runtimeFieldMapping = (Map) runtimeFieldMappingObject; - runtimeFieldMappingConsumer.accept(runtimeFieldName, runtimeFieldMapping); + runtimeFieldMappingConsumer.accept(entry.getKey(), runtimeFieldMapping); } } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index 1172984edcd1f..ab0fa8acc5e17 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.indices.SystemDataStreamDescriptor; import org.elasticsearch.indices.SystemIndices; @@ -164,8 +165,7 @@ private NameResolution resolveAliasRolloverNames(Metadata metadata, IndexAbstrac private NameResolution resolveDataStreamRolloverNames(Metadata metadata, IndexAbstraction.DataStream dataStream) { final DataStream ds = dataStream.getDataStream(); final IndexMetadata originalWriteIndex = metadata.index(dataStream.getWriteIndex()); - final DataStream rolledDataStream = ds.rollover(metadata, "uuid"); - return new NameResolution(originalWriteIndex.getIndex().getName(), null, rolledDataStream.getWriteIndex().getName()); + return new NameResolution(originalWriteIndex.getIndex().getName(), null, ds.nextWriteIndexAndGeneration(metadata).v1()); } private RolloverResult rolloverAlias(ClusterState currentState, IndexAbstraction.Alias alias, String aliasName, @@ -230,20 +230,22 @@ private RolloverResult rolloverDataStream(ClusterState currentState, IndexAbstra final DataStream ds = dataStream.getDataStream(); final Index originalWriteIndex = dataStream.getWriteIndex(); - DataStream rolledDataStream = ds.rollover(currentState.metadata(), "uuid"); - createIndexService.validateIndexName(rolledDataStream.getWriteIndex().getName(), currentState); // fails if the index already exists + final Tuple nextIndexAndGeneration = ds.nextWriteIndexAndGeneration(currentState.metadata()); + final String newWriteIndexName = nextIndexAndGeneration.v1(); + final long newGeneration = nextIndexAndGeneration.v2(); + createIndexService.validateIndexName(newWriteIndexName, currentState); // fails if the index already exists if (onlyValidate) { - return new RolloverResult(rolledDataStream.getWriteIndex().getName(), originalWriteIndex.getName(), currentState); + return new RolloverResult(newWriteIndexName, originalWriteIndex.getName(), currentState); } CreateIndexClusterStateUpdateRequest createIndexClusterStateRequest = prepareDataStreamCreateIndexRequest( dataStreamName, - rolledDataStream.getWriteIndex().getName(), + newWriteIndexName, createIndexRequest, systemDataStreamDescriptor ); ClusterState newState = createIndexService.applyCreateIndexRequest(currentState, createIndexClusterStateRequest, silent, - (builder, indexMetadata) -> builder.put(ds.rollover(currentState.metadata(), indexMetadata.getIndexUUID()))); + (builder, indexMetadata) -> builder.put(ds.rollover(indexMetadata.getIndex(), newGeneration))); RolloverInfo rolloverInfo = new RolloverInfo(dataStreamName, metConditions, threadPool.absoluteTimeInMillis()); newState = ClusterState.builder(newState) @@ -252,7 +254,7 @@ private RolloverResult rolloverDataStream(ClusterState currentState, IndexAbstra .putRolloverInfo(rolloverInfo))) .build(); - return new RolloverResult(rolledDataStream.getWriteIndex().getName(), originalWriteIndex.getName(), newState); + return new RolloverResult(newWriteIndexName, originalWriteIndex.getName(), newState); } static String generateRolloverIndexName(String sourceIndexName, IndexNameExpressionResolver indexNameExpressionResolver) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 810b00e78e16b..eb527e93655c0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.settings.put; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -204,12 +205,25 @@ public UpdateSettingsRequest fromXContent(XContentParser parser) throws IOExcept @SuppressWarnings("unchecked") Map innerBodySettingsMap = (Map) innerBodySettings; settings.putAll(innerBodySettingsMap); + checkMixedRequest(bodySettings); } else { settings.putAll(bodySettings); } return this.settings(settings); } + /** + * Checks if the request is a "mixed request". A mixed request contains both a + * "settings" map and "other" properties. Detection of a mixed request + * will result in a parse exception being thrown. + */ + private static void checkMixedRequest(Map bodySettings) { + assert bodySettings.containsKey("settings"); + if (bodySettings.size() > 1) { + throw new ElasticsearchParseException("mix of settings map and top-level properties"); + } + } + @Override public String toString() { return "indices : " + Arrays.toString(indices) + "," + Strings.toString(this); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 0f54322a25b8c..8c57a70981619 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -191,7 +191,7 @@ protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest re try { ParsedQuery parsedQuery = searchContext.getSearchExecutionContext().toQuery(request.query()); searchContext.parsedQuery(parsedQuery); - searchContext.preProcess(request.rewrite()); + searchContext.preProcess(); valid = true; explanation = explain(searchContext, request.rewrite()); } catch (QueryShardException|ParsingException e) { @@ -208,7 +208,7 @@ protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest re } private String explain(SearchContext context, boolean rewritten) { - Query query = context.query(); + Query query = rewritten ? context.rewrittenQuery() : context.query(); if (rewritten && query instanceof MatchNoDocsQuery) { return context.parsedQuery().query().toString(); } else { diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/ModifyDataStreamsAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/ModifyDataStreamsAction.java new file mode 100644 index 0000000000000..15a7aaf73f3d2 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/datastreams/ModifyDataStreamsAction.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.datastreams; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.metadata.DataStreamAction; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class ModifyDataStreamsAction extends ActionType { + + public static final ModifyDataStreamsAction INSTANCE = new ModifyDataStreamsAction(); + public static final String NAME = "indices:admin/data_stream/modify"; + + private ModifyDataStreamsAction() { + super(NAME, AcknowledgedResponse::readFrom); + } + + public static final class Request + extends AcknowledgedRequest + implements IndicesRequest, ToXContentObject { + + // relevant only for authorizing the request, so require every specified + // index to exist, expand wildcards only to open indices, prohibit + // wildcard expressions that resolve to zero indices, and do not attempt + // to resolve expressions as aliases + private static final IndicesOptions INDICES_OPTIONS = + IndicesOptions.fromOptions(false, false, true, false, true, false, true, false); + + private final List actions; + + public Request(StreamInput in) throws IOException { + super(in); + actions = in.readList(DataStreamAction::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(actions); + } + + public Request(List actions) { + this.actions = Collections.unmodifiableList(actions); + } + + public List getActions() { + return actions; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray("actions"); + for (DataStreamAction action : actions) { + action.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + @Override + public ActionRequestValidationException validate() { + if (actions.isEmpty()) { + return addValidationError("must specify at least one data stream modification action", null); + } + return null; + } + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "data_stream_actions", + args -> new Request(((List) args[0])) + ); + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), DataStreamAction.PARSER, new ParseField("actions")); + } + + @Override + public String[] indices() { + return actions.stream().map(DataStreamAction::getDataStream).toArray(String[]::new); + } + + @Override + public IndicesOptions indicesOptions() { + return INDICES_OPTIONS; + } + + @Override + public boolean includeDataStreams() { + return true; + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Arrays.equals(actions.toArray(), other.actions.toArray()); + } + + @Override + public int hashCode() { + return Objects.hash(actions); + } + + } +} diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/ModifyDataStreamsTransportAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/ModifyDataStreamsTransportAction.java new file mode 100644 index 0000000000000..cb174e0f71a0d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/datastreams/ModifyDataStreamsTransportAction.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.datastreams; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +public class ModifyDataStreamsTransportAction extends AcknowledgedTransportMasterNodeAction< + ModifyDataStreamsAction.Request> { + + private final MetadataDataStreamsService metadataDataStreamsService; + + @Inject + public ModifyDataStreamsTransportAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + MetadataDataStreamsService metadataDataStreamsService + ) { + super( + ModifyDataStreamsAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + ModifyDataStreamsAction.Request::new, + indexNameExpressionResolver, + ThreadPool.Names.SAME + ); + this.metadataDataStreamsService = metadataDataStreamsService; + } + + @Override + protected void masterOperation( + Task task, + ModifyDataStreamsAction.Request request, + ClusterState state, + ActionListener listener + ) throws Exception { + metadataDataStreamsService.modifyDataStream(request, listener); + } + + @Override + protected ClusterBlockException checkBlock(ModifyDataStreamsAction.Request request, ClusterState state) { + ClusterBlockException globalBlock = state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + if (globalBlock != null) { + return globalBlock; + } + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, + indexNameExpressionResolver.concreteIndexNames(state, request)); + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index ec577b747b9a1..caabdd1754d01 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -104,9 +104,9 @@ protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId return new ExplainResponse(shardId.getIndexName(), request.id(), false); } context.parsedQuery(context.getSearchExecutionContext().toQuery(request.query())); - context.preProcess(true); + context.preProcess(); int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().docBase; - Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); + Explanation explanation = context.searcher().explain(context.rewrittenQuery(), topLevelDocId); for (RescoreContext ctx : context.rescore()) { Rescorer rescorer = ctx.rescorer(); explanation = rescorer.explain(topLevelDocId, context.searcher(), ctx, explanation); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java index f0a7532dc80ac..13a9d28dadf4e 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java @@ -10,12 +10,14 @@ import org.elasticsearch.Version; import org.elasticsearch.index.mapper.TimeSeriesParams; +import org.elasticsearch.xcontent.InstantiatingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParserConstructor; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -159,6 +161,59 @@ public FieldCapabilities(String name, String type, } + /** + * Constructor for a set of indices used by parser + * @param name The name of the field + * @param type The type associated with the field. + * @param isMetadataField Whether this field is a metadata field. + * @param isSearchable Whether this field is indexed for search. + * @param isAggregatable Whether this field can be aggregated on. + * @param isDimension Whether this field can be used as dimension + * @param metricType If this field is a metric field, returns the metric's type or null for non-metrics fields + * @param indices The list of indices where this field name is defined as {@code type}, + * or null if all indices have the same {@code type} for the field. + * @param nonSearchableIndices The list of indices where this field is not searchable, + * or null if the field is searchable in all indices. + * @param nonAggregatableIndices The list of indices where this field is not aggregatable, + * or null if the field is aggregatable in all indices. + * @param nonDimensionIndices The list of indices where this field is not a dimension + * @param metricConflictsIndices The list of indices where this field is has different metric types or not mark as a metric + * @param meta Merged metadata across indices. + */ + @SuppressWarnings("unused") + @ParserConstructor + public FieldCapabilities( + String name, + String type, + Boolean isMetadataField, + boolean isSearchable, + boolean isAggregatable, + Boolean isDimension, + String metricType, + List indices, + List nonSearchableIndices, + List nonAggregatableIndices, + List nonDimensionIndices, + List metricConflictsIndices, + Map> meta + ) { + this( + name, + type, + isMetadataField == null ? false : isMetadataField, + isSearchable, + isAggregatable, + isDimension == null ? false : isDimension, + metricType != null ? Enum.valueOf(TimeSeriesParams.MetricType.class, metricType) : null, + indices != null ? indices.toArray(new String[0]) : null, + nonSearchableIndices != null ? nonSearchableIndices.toArray(new String[0]) : null, + nonAggregatableIndices != null ? nonAggregatableIndices.toArray(new String[0]) : null, + nonDimensionIndices != null ? nonDimensionIndices.toArray(new String[0]) : null, + metricConflictsIndices != null ? metricConflictsIndices.toArray(new String[0]) : null, + meta != null ? meta : Collections.emptyMap() + ); + } + FieldCapabilities(StreamInput in) throws IOException { this.name = in.readString(); this.type = in.readString(); @@ -254,43 +309,31 @@ public static FieldCapabilities fromXContent(String name, XContentParser parser) } @SuppressWarnings("unchecked") - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "field_capabilities", - true, - (a, name) -> new FieldCapabilities( - name, - (String) a[0], - a[3] == null ? false : (boolean) a[3], - (boolean) a[1], - (boolean) a[2], - a[4] == null ? false : (boolean) a[4], - a[5] != null ? Enum.valueOf(TimeSeriesParams.MetricType.class, (String) a[5]) : null, - a[6] != null ? ((List) a[6]).toArray(new String[0]) : null, - a[7] != null ? ((List) a[7]).toArray(new String[0]) : null, - a[8] != null ? ((List) a[8]).toArray(new String[0]) : null, - a[9] != null ? ((List) a[9]).toArray(new String[0]) : null, - a[10] != null ? ((List) a[10]).toArray(new String[0]) : null, - a[11] != null ? ((Map>) a[11]) : Collections.emptyMap() - ) - ); + private static final InstantiatingObjectParser PARSER; static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), TYPE_FIELD); // 0 - PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), SEARCHABLE_FIELD); // 1 - PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), AGGREGATABLE_FIELD); // 2 - PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), IS_METADATA_FIELD); // 3 - PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), TIME_SERIES_DIMENSION_FIELD); // 4 - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), TIME_SERIES_METRIC_FIELD); // 5 - PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), INDICES_FIELD); // 6 - PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_SEARCHABLE_INDICES_FIELD); // 7 - PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_AGGREGATABLE_INDICES_FIELD); // 8 - PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_DIMENSION_INDICES_FIELD); // 9 - PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), METRIC_CONFLICTS_INDICES_FIELD); // 10 - PARSER.declareObject( + InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( + "field_capabilities", + true, + FieldCapabilities.class + ); + parser.declareString(ConstructingObjectParser.constructorArg(), TYPE_FIELD); + parser.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), IS_METADATA_FIELD); + parser.declareBoolean(ConstructingObjectParser.constructorArg(), SEARCHABLE_FIELD); + parser.declareBoolean(ConstructingObjectParser.constructorArg(), AGGREGATABLE_FIELD); + parser.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), TIME_SERIES_DIMENSION_FIELD); + parser.declareString(ConstructingObjectParser.optionalConstructorArg(), TIME_SERIES_METRIC_FIELD); + parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), INDICES_FIELD); + parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_SEARCHABLE_INDICES_FIELD); + parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_AGGREGATABLE_INDICES_FIELD); + parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), NON_DIMENSION_INDICES_FIELD); + parser.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), METRIC_CONFLICTS_INDICES_FIELD); + parser.declareObject( ConstructingObjectParser.optionalConstructorArg(), - (parser, context) -> parser.map(HashMap::new, p -> Set.copyOf(p.list())), + (p, context) -> p.map(HashMap::new, v -> Set.copyOf(v.list())), META_FIELD - ); // 11 + ); + PARSER = parser.build(); } /** diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index b5cf692723dd7..cfdb932199b88 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -71,7 +71,7 @@ public TransportFieldCapabilitiesAction(TransportService transportService, this.fieldCapabilitiesFetcher = new FieldCapabilitiesFetcher(indicesService); final Set metadataFields = indicesService.getAllMetadataFields(); this.metadataFieldPred = metadataFields::contains; - transportService.registerRequestHandler(ACTION_NODE_NAME, ThreadPool.Names.MANAGEMENT, + transportService.registerRequestHandler(ACTION_NODE_NAME, ThreadPool.Names.SEARCH_COORDINATION, FieldCapabilitiesNodeRequest::new, new NodeTransportHandler()); } @@ -111,7 +111,7 @@ protected void doExecute(Task task, FieldCapabilitiesRequest request, final Acti localIndices, nowInMillis, concreteIndices, - threadPool.executor(ThreadPool.Names.MANAGEMENT), + threadPool.executor(ThreadPool.Names.SEARCH_COORDINATION), indexResponse -> indexResponses.putIfAbsent(indexResponse.getIndexName(), indexResponse), indexFailures::collect, countDown @@ -163,7 +163,7 @@ private Runnable createResponseMerger(FieldCapabilitiesRequest request, if (request.isMergeResults()) { // fork off to the management pool for merging the responses as the operation can run for longer than is acceptable // on a transport thread in case of large numbers of indices and/or fields - threadPool.executor(ThreadPool.Names.MANAGEMENT).submit( + threadPool.executor(ThreadPool.Names.SEARCH_COORDINATION).submit( ActionRunnable.supply( listener, () -> merge(indexResponses, request.includeUnmapped(), new ArrayList<>(failures))) diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java index 918bbf426b2b8..96fd61c9f7010 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineRequest.java @@ -48,7 +48,7 @@ public PutPipelineRequest(StreamInput in) throws IOException { id = in.readString(); source = in.readBytesReference(); xContentType = in.readEnum(XContentType.class); - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getVersion().onOrAfter(Version.V_7_16_0)) { version = in.readOptionalInt(); } else { version = null; @@ -86,7 +86,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeBytesReference(source); XContentHelper.writeTo(out, xContentType); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getVersion().onOrAfter(Version.V_7_16_0)) { out.writeOptionalInt(version); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 086441e280218..0e49a347e3da0 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -23,12 +23,10 @@ import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchContextMissingException; import org.elasticsearch.search.SearchPhaseResult; @@ -65,7 +63,6 @@ */ abstract class AbstractSearchAsyncAction extends SearchPhase implements SearchPhaseContext { private static final float DEFAULT_INDEX_BOOST = 1.0f; - private static final long[] EMPTY_LONG_ARRAY = new long[0]; private final Logger logger; private final SearchTransportService searchTransportService; private final Executor executor; @@ -736,21 +733,9 @@ public final ShardSearchRequest buildShardSearchRequest(SearchShardIterator shar AliasFilter filter = aliasFilter.get(shardIt.shardId().getIndex().getUUID()); assert filter != null; float indexBoost = concreteIndexBoosts.getOrDefault(shardIt.shardId().getIndex().getUUID(), DEFAULT_INDEX_BOOST); - final Map indexToWaitForCheckpoints = request.getWaitForCheckpoints(); - final TimeValue waitForCheckpointsTimeout = request.getWaitForCheckpointsTimeout(); - final long[] waitForCheckpoints = indexToWaitForCheckpoints.getOrDefault(shardIt.shardId().getIndex().getName(), EMPTY_LONG_ARRAY); - - long waitForCheckpoint; - if (waitForCheckpoints.length == 0) { - waitForCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } else { - assert waitForCheckpoints.length > shardIndex; - waitForCheckpoint = waitForCheckpoints[shardIndex]; - } ShardSearchRequest shardRequest = new ShardSearchRequest(shardIt.getOriginalIndices(), request, shardIt.shardId(), shardIndex, getNumShards(), filter, indexBoost, timeProvider.getAbsoluteStartMillis(), - shardIt.getClusterAlias(), shardIt.getSearchContextId(), shardIt.getSearchContextKeepAlive(), waitForCheckpoint, - waitForCheckpointsTimeout); + shardIt.getClusterAlias(), shardIt.getSearchContextId(), shardIt.getSearchContextKeepAlive()); // if we already received a search result we can inform the shard that it // can return a null response if the request rewrites to match none rather // than creating an empty response in the search thread pool. diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java new file mode 100644 index 0000000000000..01777f671a1ab --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java @@ -0,0 +1,233 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.Version; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.Scroll; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.ShardSearchContextId; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.transport.TransportRequest; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Node-level request used during can-match phase + */ +public class CanMatchNodeRequest extends TransportRequest implements IndicesRequest { + + private final SearchSourceBuilder source; + private final List shards; + private final SearchType searchType; + private final Boolean requestCache; + private final boolean allowPartialSearchResults; + private final Scroll scroll; + private final int numberOfShards; + private final long nowInMillis; + @Nullable + private final String clusterAlias; + private final String[] indices; + private final IndicesOptions indicesOptions; + private final TimeValue waitForCheckpointsTimeout; + + public static class Shard implements Writeable { + private final String[] indices; + private final ShardId shardId; + private final int shardRequestIndex; + private final AliasFilter aliasFilter; + private final float indexBoost; + private final ShardSearchContextId readerId; + private final TimeValue keepAlive; + private final long waitForCheckpoint; + + public Shard(String[] indices, + ShardId shardId, + int shardRequestIndex, + AliasFilter aliasFilter, + float indexBoost, + ShardSearchContextId readerId, + TimeValue keepAlive, + long waitForCheckpoint) { + this.indices = indices; + this.shardId = shardId; + this.shardRequestIndex = shardRequestIndex; + this.aliasFilter = aliasFilter; + this.indexBoost = indexBoost; + this.readerId = readerId; + this.keepAlive = keepAlive; + this.waitForCheckpoint = waitForCheckpoint; + assert keepAlive == null || readerId != null : "readerId: " + readerId + " keepAlive: " + keepAlive; + } + + public Shard(StreamInput in) throws IOException { + indices = in.readStringArray(); + shardId = new ShardId(in); + shardRequestIndex = in.readVInt(); + aliasFilter = new AliasFilter(in); + indexBoost = in.readFloat(); + readerId = in.readOptionalWriteable(ShardSearchContextId::new); + keepAlive = in.readOptionalTimeValue(); + waitForCheckpoint = in.readLong(); + assert keepAlive == null || readerId != null : "readerId: " + readerId + " keepAlive: " + keepAlive; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeStringArray(indices); + shardId.writeTo(out); + out.writeVInt(shardRequestIndex); + aliasFilter.writeTo(out); + out.writeFloat(indexBoost); + out.writeOptionalWriteable(readerId); + out.writeOptionalTimeValue(keepAlive); + out.writeLong(waitForCheckpoint); + } + + public int getShardRequestIndex() { + return shardRequestIndex; + } + + public String[] getOriginalIndices() { + return indices; + } + + public ShardId shardId() { + return shardId; + } + } + + public CanMatchNodeRequest( + SearchRequest searchRequest, + IndicesOptions indicesOptions, + List shards, + int numberOfShards, + long nowInMillis, + @Nullable String clusterAlias + ) { + this.source = searchRequest.source(); + this.indicesOptions = indicesOptions; + this.shards = new ArrayList<>(shards); + this.searchType = searchRequest.searchType(); + this.requestCache = searchRequest.requestCache(); + // If allowPartialSearchResults is unset (ie null), the cluster-level default should have been substituted + // at this stage. Any NPEs in the above are therefore an error in request preparation logic. + assert searchRequest.allowPartialSearchResults() != null; + this.allowPartialSearchResults = searchRequest.allowPartialSearchResults(); + this.scroll = searchRequest.scroll(); + this.numberOfShards = numberOfShards; + this.nowInMillis = nowInMillis; + this.clusterAlias = clusterAlias; + this.waitForCheckpointsTimeout = searchRequest.getWaitForCheckpointsTimeout(); + indices = shards.stream().map(Shard::getOriginalIndices).flatMap(Arrays::stream).distinct() + .toArray(String[]::new); + } + + public CanMatchNodeRequest(StreamInput in) throws IOException { + super(in); + source = in.readOptionalWriteable(SearchSourceBuilder::new); + indicesOptions = IndicesOptions.readIndicesOptions(in); + searchType = SearchType.fromId(in.readByte()); + if (in.getVersion().before(Version.V_8_0_0)) { + // types no longer relevant so ignore + String[] types = in.readStringArray(); + if (types.length > 0) { + throw new IllegalStateException( + "types are no longer supported in search requests but found [" + Arrays.toString(types) + "]"); + } + } + scroll = in.readOptionalWriteable(Scroll::new); + requestCache = in.readOptionalBoolean(); + allowPartialSearchResults = in.readBoolean(); + numberOfShards = in.readVInt(); + nowInMillis = in.readVLong(); + clusterAlias = in.readOptionalString(); + waitForCheckpointsTimeout = in.readTimeValue(); + shards = in.readList(Shard::new); + indices = shards.stream().map(Shard::getOriginalIndices).flatMap(Arrays::stream).distinct() + .toArray(String[]::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalWriteable(source); + indicesOptions.writeIndicesOptions(out); + out.writeByte(searchType.id()); + if (out.getVersion().before(Version.V_8_0_0)) { + // types not supported so send an empty array to previous versions + out.writeStringArray(Strings.EMPTY_ARRAY); + } + out.writeOptionalWriteable(scroll); + out.writeOptionalBoolean(requestCache); + out.writeBoolean(allowPartialSearchResults); + out.writeVInt(numberOfShards); + out.writeVLong(nowInMillis); + out.writeOptionalString(clusterAlias); + out.writeTimeValue(waitForCheckpointsTimeout); + out.writeList(shards); + } + + public List getShardLevelRequests() { + return shards; + } + + public List createShardSearchRequests() { + return shards.stream().map(this::createShardSearchRequest).collect(Collectors.toList()); + } + + public ShardSearchRequest createShardSearchRequest(Shard r) { + ShardSearchRequest shardSearchRequest = new ShardSearchRequest( + new OriginalIndices(r.indices, indicesOptions), r.shardId, r.shardRequestIndex, numberOfShards, searchType, + source, requestCache, r.aliasFilter, r.indexBoost, allowPartialSearchResults, scroll, + nowInMillis, clusterAlias, r.readerId, r.keepAlive, r.waitForCheckpoint, waitForCheckpointsTimeout + ); + shardSearchRequest.setParentTask(getParentTask()); + return shardSearchRequest; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new SearchShardTask(id, type, action, getDescription(), parentTaskId, headers); + } + + @Override + public String getDescription() { + // Shard id is enough here, the request itself can be found by looking at the parent task description + return "shardIds[" + shards.stream().map(slr -> slr.shardId).collect(Collectors.toList()) + "]"; + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeResponse.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeResponse.java new file mode 100644 index 0000000000000..05aaaa56583ed --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeResponse.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.search.CanMatchShardResponse; +import org.elasticsearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.List; + +public class CanMatchNodeResponse extends TransportResponse { + + private final List responses; + + public CanMatchNodeResponse(StreamInput in) throws IOException { + super(in); + responses = in.readList(ResponseOrFailure::new); + } + + public CanMatchNodeResponse(List responses) { + this.responses = responses; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(responses); + } + + public List getResponses() { + return responses; + } + + public static class ResponseOrFailure implements Writeable { + + public ResponseOrFailure(CanMatchShardResponse response) { + this.response = response; + this.exception = null; + } + + public ResponseOrFailure(Exception exception) { + this.exception = exception; + this.response = null; + } + + @Nullable + public CanMatchShardResponse getResponse() { + return response; + } + + @Nullable + public Exception getException() { + return exception; + } + + private final CanMatchShardResponse response; + private final Exception exception; + + public ResponseOrFailure(StreamInput in) throws IOException { + if (in.readBoolean()) { + response = new CanMatchShardResponse(in); + exception = null; + } else { + exception = in.readException(); + response = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + final boolean hasResponse = response != null; + out.writeBoolean(hasResponse); + if (hasResponse) { + response.writeTo(out); + } else { + out.writeException(exception); + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index 97726626abe55..2f26026b5d70d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -5,31 +5,45 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ + package org.elasticsearch.action.search; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.FixedBitSet; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.core.Releasable; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.query.CoordinatorRewriteContext; import org.elasticsearch.index.query.CoordinatorRewriteContextProvider; +import org.elasticsearch.search.CanMatchShardResponse; import org.elasticsearch.search.SearchService; -import org.elasticsearch.search.SearchService.CanMatchResponse; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.MinAndMax; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; @@ -49,152 +63,409 @@ * sort them according to the provided order. This can be useful for instance to ensure that shards that contain recent * data are executed first when sorting by descending timestamp. */ -final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction { +final class CanMatchPreFilterSearchPhase extends SearchPhase { - private final Function, SearchPhase> phaseFactory; + private final Logger logger; + private final SearchRequest request; private final GroupShardsIterator shardsIts; + private final ActionListener listener; + private final SearchResponse.Clusters clusters; + private final TransportSearchAction.SearchTimeProvider timeProvider; + private final BiFunction nodeIdToConnection; + private final SearchTransportService searchTransportService; + private final Map shardItIndexMap; + private final Map concreteIndexBoosts; + private final Map aliasFilter; + private final SearchTask task; + private final Function, SearchPhase> phaseFactory; + private final Executor executor; + + private final CanMatchSearchPhaseResults results; private final CoordinatorRewriteContextProvider coordinatorRewriteContextProvider; + CanMatchPreFilterSearchPhase(Logger logger, SearchTransportService searchTransportService, - BiFunction nodeIdToConnection, - Map aliasFilter, Map concreteIndexBoosts, - Executor executor, SearchRequest request, - ActionListener listener, GroupShardsIterator shardsIts, - TransportSearchAction.SearchTimeProvider timeProvider, ClusterState clusterState, - SearchTask task, Function, SearchPhase> phaseFactory, - SearchResponse.Clusters clusters, CoordinatorRewriteContextProvider coordinatorRewriteContextProvider) { - //We set max concurrent shard requests to the number of shards so no throttling happens for can_match requests - super("can_match", logger, searchTransportService, nodeIdToConnection, aliasFilter, concreteIndexBoosts, - executor, request, listener, shardsIts, timeProvider, clusterState, task, - new CanMatchSearchPhaseResults(shardsIts.size()), shardsIts.size(), clusters); - this.phaseFactory = phaseFactory; + BiFunction nodeIdToConnection, + Map aliasFilter, Map concreteIndexBoosts, + Executor executor, SearchRequest request, + ActionListener listener, GroupShardsIterator shardsIts, + TransportSearchAction.SearchTimeProvider timeProvider, + SearchTask task, Function, SearchPhase> phaseFactory, + SearchResponse.Clusters clusters, + CoordinatorRewriteContextProvider coordinatorRewriteContextProvider) { + super("can_match"); + this.logger = logger; + this.searchTransportService = searchTransportService; + this.nodeIdToConnection = nodeIdToConnection; + this.request = request; + this.listener = listener; this.shardsIts = shardsIts; + this.clusters = clusters; + this.timeProvider = timeProvider; + this.concreteIndexBoosts = concreteIndexBoosts; + this.aliasFilter = aliasFilter; + this.task = task; + this.phaseFactory = phaseFactory; this.coordinatorRewriteContextProvider = coordinatorRewriteContextProvider; + this.executor = executor; + this.shardItIndexMap = new HashMap<>(); + results = new CanMatchSearchPhaseResults(shardsIts.size()); + + // we compute the shard index based on the natural order of the shards + // that participate in the search request. This means that this number is + // consistent between two requests that target the same shards. + List naturalOrder = new ArrayList<>(); + shardsIts.iterator().forEachRemaining(naturalOrder::add); + CollectionUtil.timSort(naturalOrder); + for (int i = 0; i < naturalOrder.size(); i++) { + shardItIndexMap.put(naturalOrder.get(i), i); + } } - @Override - public void addReleasable(Releasable releasable) { - throw new RuntimeException("cannot add releasable in " + getName() + " phase"); + private static boolean assertSearchCoordinationThread() { + assert Thread.currentThread().getName().contains(ThreadPool.Names.SEARCH_COORDINATION) : + "not called from the right thread " + Thread.currentThread().getName(); + return true; } @Override - protected void executePhaseOnShard(SearchShardIterator shardIt, SearchShardTarget shard, - SearchActionListener listener) { - getSearchTransport().sendCanMatch(getConnection(shard.getClusterAlias(), shard.getNodeId()), - buildShardSearchRequest(shardIt, listener.requestIndex), getTask(), listener); + public void run() throws IOException { + assert assertSearchCoordinationThread(); + checkNoMissingShards(); + Version version = request.minCompatibleShardNode(); + if (version != null && Version.CURRENT.minimumCompatibilityVersion().equals(version) == false) { + if (checkMinimumVersion(shardsIts) == false) { + throw new VersionMismatchException("One of the shards is incompatible with the required minimum version [{}]", + request.minCompatibleShardNode()); + } + } + + runCoordinatorRewritePhase(); } - @Override - protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + // tries to pre-filter shards based on information that's available to the coordinator + // without having to reach out to the actual shards + private void runCoordinatorRewritePhase() { + assert assertSearchCoordinationThread(); + final List matchedShardLevelRequests = new ArrayList<>(); + for (SearchShardIterator searchShardIterator : shardsIts) { + final CanMatchNodeRequest canMatchNodeRequest = + new CanMatchNodeRequest(request, searchShardIterator.getOriginalIndices().indicesOptions(), + Collections.emptyList(), getNumShards(), timeProvider.getAbsoluteStartMillis(), searchShardIterator.getClusterAlias()); + final ShardSearchRequest request = canMatchNodeRequest.createShardSearchRequest(buildShardLevelRequest(searchShardIterator)); + boolean canMatch = true; + CoordinatorRewriteContext coordinatorRewriteContext = + coordinatorRewriteContextProvider.getCoordinatorRewriteContext(request.shardId().getIndex()); + if (coordinatorRewriteContext != null) { + try { + canMatch = SearchService.queryStillMatchesAfterRewrite(request, coordinatorRewriteContext); + } catch (Exception e) { + // treat as if shard is still a potential match + } + } + if (canMatch) { + matchedShardLevelRequests.add(searchShardIterator); + } else { + CanMatchShardResponse result = new CanMatchShardResponse(canMatch, null); + result.setShardIndex(request.shardRequestIndex()); + results.consumeResult(result, () -> { + }); + } + } - return phaseFactory.apply(getIterator((CanMatchSearchPhaseResults) results, shardsIts)); + if (matchedShardLevelRequests.isEmpty() == false) { + new Round(new GroupShardsIterator<>(matchedShardLevelRequests)).run(); + } else { + finishPhase(); + } } - private GroupShardsIterator getIterator(CanMatchSearchPhaseResults results, - GroupShardsIterator shardsIts) { - int cardinality = results.getNumPossibleMatches(); - FixedBitSet possibleMatches = results.getPossibleMatches(); - if (cardinality == 0) { - // this is a special case where we have no hit but we need to get at least one search response in order - // to produce a valid search result with all the aggs etc. - // Since it's possible that some of the shards that we're skipping are - // unavailable, we would try to query the node that at least has some - // shards available in order to produce a valid search result. - int shardIndexToQuery = 0; - for (int i = 0; i < shardsIts.size(); i++) { - if (shardsIts.get(i).size() > 0) { - shardIndexToQuery = i; - break; + private void checkNoMissingShards() { + assert assertSearchCoordinationThread(); + assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults"; + if (request.allowPartialSearchResults() == false) { + final StringBuilder missingShards = new StringBuilder(); + // Fail-fast verification of all shards being available + for (int index = 0; index < shardsIts.size(); index++) { + final SearchShardIterator shardRoutings = shardsIts.get(index); + if (shardRoutings.size() == 0) { + if (missingShards.length() > 0) { + missingShards.append(", "); + } + missingShards.append(shardRoutings.shardId()); } } - possibleMatches.set(shardIndexToQuery); + if (missingShards.length() > 0) { + //Status red - shard is missing all copies and would produce partial results for an index search + final String msg = "Search rejected due to missing shards ["+ missingShards + + "]. Consider using `allow_partial_search_results` setting to bypass this error."; + throw new SearchPhaseExecutionException(getName(), msg, null, ShardSearchFailure.EMPTY_ARRAY); + } } - SearchSourceBuilder source = getRequest().source(); - int i = 0; - for (SearchShardIterator iter : shardsIts) { - if (possibleMatches.get(i++)) { - iter.reset(); + } + + private Map> groupByNode(GroupShardsIterator shards) { + Map> requests = new HashMap<>(); + for (int i = 0; i < shards.size(); i++) { + final SearchShardIterator shardRoutings = shards.get(i); + assert shardRoutings.skip() == false; + assert shardItIndexMap.containsKey(shardRoutings); + SearchShardTarget target = shardRoutings.nextOrNull(); + if (target != null) { + requests.computeIfAbsent(new SendingTarget(target.getClusterAlias(), target.getNodeId()), + t -> new ArrayList<>()).add(shardRoutings); } else { - iter.resetAndSkip(); + requests.computeIfAbsent(new SendingTarget(null, null), + t -> new ArrayList<>()).add(shardRoutings); } } - if (shouldSortShards(results.minAndMaxes) == false) { - return shardsIts; - } - FieldSortBuilder fieldSort = FieldSortBuilder.getPrimaryFieldSortOrNull(source); - return new GroupShardsIterator<>(sortShards(shardsIts, results.minAndMaxes, fieldSort.order())); + return requests; } - @Override - protected void performPhaseOnShard(int shardIndex, SearchShardIterator shardIt, SearchShardTarget shard) { - CoordinatorRewriteContext coordinatorRewriteContext = - coordinatorRewriteContextProvider.getCoordinatorRewriteContext(shardIt.shardId().getIndex()); + /** + * Sending can-match requests is round-based and grouped per target node. + * If there are failures during a round, there will be a follow-up round + * to retry on other available shard copies. + */ + class Round extends AbstractRunnable { + private final GroupShardsIterator shards; + private final CountDown countDown; + private final AtomicReferenceArray failedResponses; - if (coordinatorRewriteContext == null) { - super.performPhaseOnShard(shardIndex, shardIt, shard); - return; + Round(GroupShardsIterator shards) { + this.shards = shards; + this.countDown = new CountDown(shards.size()); + this.failedResponses = new AtomicReferenceArray<>(shardsIts.size()); } - try { - ShardSearchRequest request = buildShardSearchRequest(shardIt, shardIndex); - boolean canMatch = SearchService.queryStillMatchesAfterRewrite(request, coordinatorRewriteContext); + @Override + protected void doRun() { + assert assertSearchCoordinationThread(); + final Map> requests = groupByNode(shards); - // Trigger the query as there's still a chance that we can skip - // this shard given other query filters that we cannot apply - // in the coordinator - if (canMatch) { - super.performPhaseOnShard(shardIndex, shardIt, shard); - return; + for (Map.Entry> entry : requests.entrySet()) { + CanMatchNodeRequest canMatchNodeRequest = createCanMatchRequest(entry); + List shardLevelRequests = canMatchNodeRequest.getShardLevelRequests(); + + if (entry.getKey().nodeId == null) { + // no target node: just mark the requests as failed + for (CanMatchNodeRequest.Shard shard : shardLevelRequests) { + onOperationFailed(shard.getShardRequestIndex(), null); + } + continue; + } + + try { + searchTransportService.sendCanMatch(getConnection(entry.getKey()), canMatchNodeRequest, + task, new ActionListener<>() { + @Override + public void onResponse(CanMatchNodeResponse canMatchNodeResponse) { + assert canMatchNodeResponse.getResponses().size() == canMatchNodeRequest.getShardLevelRequests().size(); + for (int i = 0; i < canMatchNodeResponse.getResponses().size(); i++) { + CanMatchNodeResponse.ResponseOrFailure response = canMatchNodeResponse.getResponses().get(i); + if (response.getResponse() != null) { + CanMatchShardResponse shardResponse = response.getResponse(); + shardResponse.setShardIndex(shardLevelRequests.get(i).getShardRequestIndex()); + onOperation(shardResponse.getShardIndex(), shardResponse); + } else { + Exception failure = response.getException(); + assert failure != null; + onOperationFailed(shardLevelRequests.get(i).getShardRequestIndex(), failure); + } + } + } + + @Override + public void onFailure(Exception e) { + for (CanMatchNodeRequest.Shard shard : shardLevelRequests) { + onOperationFailed(shard.getShardRequestIndex(), e); + } + } + } + ); + } catch (Exception e) { + for (CanMatchNodeRequest.Shard shard : shardLevelRequests) { + onOperationFailed(shard.getShardRequestIndex(), e); + } + } + } + } + + private void onOperation(int idx, CanMatchShardResponse response) { + failedResponses.set(idx, null); + results.consumeResult(response, () -> { + if (countDown.countDown()) { + finishRound(); + } + }); + } + + private void onOperationFailed(int idx, Exception e) { + failedResponses.set(idx, e); + results.consumeShardFailure(idx); + if (countDown.countDown()) { + finishRound(); + } + } + + private void finishRound() { + List remainingShards = new ArrayList<>(); + for (SearchShardIterator ssi : shards) { + int shardIndex = shardItIndexMap.get(ssi); + Exception failedResponse = failedResponses.get(shardIndex); + if (failedResponse != null) { + remainingShards.add(ssi); + } + } + if (remainingShards.isEmpty()) { + finishPhase(); + } else { + // trigger another round, forcing execution + executor.execute(new Round(new GroupShardsIterator<>(remainingShards)) { + @Override + public boolean isForceExecution() { + return true; + } + }); } + } + + @Override + public void onFailure(Exception e) { + if (logger.isDebugEnabled()) { + logger.debug(new ParameterizedMessage("Failed to execute [{}] while running [{}] phase", request, getName()), e); + } + onPhaseFailure("round", e); + } + } - CanMatchResponse result = new CanMatchResponse(canMatch, null); - result.setSearchShardTarget(shard == null ? new SearchShardTarget(null, shardIt.shardId(), shardIt.getClusterAlias()) : shard); - result.setShardIndex(shardIndex); - fork(() -> onShardResult(result, shardIt)); + private static class SendingTarget { + @Nullable + private final String clusterAlias; + @Nullable + private final String nodeId; + + SendingTarget(@Nullable String clusterAlias, @Nullable String nodeId) { + this.clusterAlias = clusterAlias; + this.nodeId = nodeId; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SendingTarget that = (SendingTarget) o; + return Objects.equals(clusterAlias, that.clusterAlias) && + Objects.equals(nodeId, that.nodeId); + } + + @Override + public int hashCode() { + return Objects.hash(clusterAlias, nodeId); + } + } + + private CanMatchNodeRequest createCanMatchRequest(Map.Entry> entry) { + final SearchShardIterator first = entry.getValue().get(0); + final List shardLevelRequests = + entry.getValue().stream().map(this::buildShardLevelRequest).collect(Collectors.toCollection(ArrayList::new)); + assert entry.getValue().stream().allMatch(Objects::nonNull); + assert entry.getValue().stream().allMatch(ssi -> Objects.equals(ssi.getOriginalIndices().indicesOptions(), + first.getOriginalIndices().indicesOptions())); + assert entry.getValue().stream().allMatch(ssi -> Objects.equals(ssi.getClusterAlias(), first.getClusterAlias())); + return new CanMatchNodeRequest(request, first.getOriginalIndices().indicesOptions(), + shardLevelRequests, getNumShards(), timeProvider.getAbsoluteStartMillis(), first.getClusterAlias()); + } + + private void finishPhase() { + try { + phaseFactory.apply(getIterator(results, shardsIts)).start(); } catch (Exception e) { - // If we fail to rewrite it on the coordinator, just try to execute - // the query in the shard. - super.performPhaseOnShard(shardIndex, shardIt, shard); + if (logger.isDebugEnabled()) { + logger.debug(new ParameterizedMessage("Failed to execute [{}] while running [{}] phase", request, getName()), e); + } + onPhaseFailure("finish", e); } } - private static List sortShards(GroupShardsIterator shardsIts, - MinAndMax[] minAndMaxes, - SortOrder order) { - return IntStream.range(0, shardsIts.size()) - .boxed() - .sorted(shardComparator(shardsIts, minAndMaxes, order)) - .map(shardsIts::get) - .collect(Collectors.toList()); + private static final float DEFAULT_INDEX_BOOST = 1.0f; + + public CanMatchNodeRequest.Shard buildShardLevelRequest(SearchShardIterator shardIt) { + AliasFilter filter = aliasFilter.get(shardIt.shardId().getIndex().getUUID()); + assert filter != null; + float indexBoost = concreteIndexBoosts.getOrDefault(shardIt.shardId().getIndex().getUUID(), DEFAULT_INDEX_BOOST); + int shardRequestIndex = shardItIndexMap.get(shardIt); + return new CanMatchNodeRequest.Shard(shardIt.getOriginalIndices().indices(), shardIt.shardId(), + shardRequestIndex, filter, indexBoost, shardIt.getSearchContextId(), shardIt.getSearchContextKeepAlive(), + ShardSearchRequest.computeWaitForCheckpoint(request.getWaitForCheckpoints(), shardIt.shardId(), shardRequestIndex)); } - private static boolean shouldSortShards(MinAndMax[] minAndMaxes) { - Class clazz = null; - for (MinAndMax minAndMax : minAndMaxes) { - if (clazz == null) { - clazz = minAndMax == null ? null : minAndMax.getMin().getClass(); - } else if (minAndMax != null && clazz != minAndMax.getMin().getClass()) { - // we don't support sort values that mix different types (e.g.: long/double, numeric/keyword). - // TODO: we could fail the request because there is a high probability - // that the merging of topdocs will fail later for the same reason ? - return false; + private boolean checkMinimumVersion(GroupShardsIterator shardsIts) { + for (SearchShardIterator it : shardsIts) { + if (it.getTargetNodeIds().isEmpty() == false) { + boolean isCompatible = it.getTargetNodeIds().stream().anyMatch(nodeId -> { + Transport.Connection conn = getConnection(new SendingTarget(it.getClusterAlias(), nodeId)); + return conn == null || conn.getVersion().onOrAfter(request.minCompatibleShardNode()); + }); + if (isCompatible == false) { + return false; + } } } - return clazz != null; + return true; } - private static Comparator shardComparator(GroupShardsIterator shardsIts, - MinAndMax[] minAndMaxes, - SortOrder order) { - final Comparator comparator = Comparator.comparing( - index -> minAndMaxes[index], - forciblyCast(MinAndMax.getComparator(order)) - ); + @Override + public void start() { + if (getNumShards() == 0) { + //no search shards to search on, bail with empty response + //(it happens with search across _all with no indices around and consistent with broadcast operations) + int trackTotalHitsUpTo = request.source() == null ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO : + request.source().trackTotalHitsUpTo() == null ? SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO : + request.source().trackTotalHitsUpTo(); + // total hits is null in the response if the tracking of total hits is disabled + boolean withTotalHits = trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED; + listener.onResponse(new SearchResponse(InternalSearchResponse.empty(withTotalHits), null, 0, 0, + 0, timeProvider.buildTookInMillis(), ShardSearchFailure.EMPTY_ARRAY, clusters, null)); + return; + } + + // Note that the search is failed when this task is rejected by the executor + executor.execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + if (logger.isDebugEnabled()) { + logger.debug(new ParameterizedMessage("Failed to execute [{}] while running [{}] phase", request, getName()), e); + } + onPhaseFailure("start", e); + } + + @Override + protected void doRun() throws IOException { + CanMatchPreFilterSearchPhase.this.run(); + } + }); + } - return comparator.thenComparing(index -> shardsIts.get(index)); + + public void onPhaseFailure(String msg, Exception cause) { + listener.onFailure(new SearchPhaseExecutionException(getName(), msg, cause, ShardSearchFailure.EMPTY_ARRAY)); + } + + public Transport.Connection getConnection(SendingTarget sendingTarget) { + Transport.Connection conn = nodeIdToConnection.apply(sendingTarget.clusterAlias, sendingTarget.nodeId); + Version minVersion = request.minCompatibleShardNode(); + if (minVersion != null && conn != null && conn.getVersion().before(minVersion)) { + throw new VersionMismatchException("One of the shards is incompatible with the required minimum version [{}]", minVersion); + } + return conn; } - private static final class CanMatchSearchPhaseResults extends SearchPhaseResults { + private int getNumShards() { + return shardsIts.size(); + } + + private static final class CanMatchSearchPhaseResults extends SearchPhaseResults { private final FixedBitSet possibleMatches; private final MinAndMax[] minAndMaxes; private int numPossibleMatches; @@ -206,7 +477,7 @@ private static final class CanMatchSearchPhaseResults extends SearchPhaseResults } @Override - void consumeResult(CanMatchResponse result, Runnable next) { + void consumeResult(CanMatchShardResponse result, Runnable next) { try { consumeResult(result.getShardIndex(), result.canMatch(), result.estimatedMinAndMax()); } finally { @@ -242,8 +513,81 @@ synchronized FixedBitSet getPossibleMatches() { } @Override - Stream getSuccessfulResults() { + Stream getSuccessfulResults() { return Stream.empty(); } } + + private GroupShardsIterator getIterator(CanMatchSearchPhaseResults results, + GroupShardsIterator shardsIts) { + int cardinality = results.getNumPossibleMatches(); + FixedBitSet possibleMatches = results.getPossibleMatches(); + if (cardinality == 0) { + // this is a special case where we have no hit but we need to get at least one search response in order + // to produce a valid search result with all the aggs etc. + // Since it's possible that some of the shards that we're skipping are + // unavailable, we would try to query the node that at least has some + // shards available in order to produce a valid search result. + int shardIndexToQuery = 0; + for (int i = 0; i < shardsIts.size(); i++) { + if (shardsIts.get(i).size() > 0) { + shardIndexToQuery = i; + break; + } + } + possibleMatches.set(shardIndexToQuery); + } + SearchSourceBuilder source = request.source(); + int i = 0; + for (SearchShardIterator iter : shardsIts) { + if (possibleMatches.get(i++)) { + iter.reset(); + } else { + iter.resetAndSkip(); + } + } + if (shouldSortShards(results.minAndMaxes) == false) { + return shardsIts; + } + FieldSortBuilder fieldSort = FieldSortBuilder.getPrimaryFieldSortOrNull(source); + return new GroupShardsIterator<>(sortShards(shardsIts, results.minAndMaxes, fieldSort.order())); + } + + private static List sortShards(GroupShardsIterator shardsIts, + MinAndMax[] minAndMaxes, + SortOrder order) { + return IntStream.range(0, shardsIts.size()) + .boxed() + .sorted(shardComparator(shardsIts, minAndMaxes, order)) + .map(shardsIts::get) + .collect(Collectors.toList()); + } + + private static boolean shouldSortShards(MinAndMax[] minAndMaxes) { + Class clazz = null; + for (MinAndMax minAndMax : minAndMaxes) { + if (clazz == null) { + clazz = minAndMax == null ? null : minAndMax.getMin().getClass(); + } else if (minAndMax != null && clazz != minAndMax.getMin().getClass()) { + // we don't support sort values that mix different types (e.g.: long/double, numeric/keyword). + // TODO: we could fail the request because there is a high probability + // that the merging of topdocs will fail later for the same reason ? + return false; + } + } + return clazz != null; + } + + private static Comparator shardComparator(GroupShardsIterator shardsIts, + MinAndMax[] minAndMaxes, + SortOrder order) { + final Comparator comparator = Comparator.comparing( + index -> minAndMaxes[index], + forciblyCast(MinAndMax.getComparator(order)) + ); + + return comparator.thenComparing(shardsIts::get); + } + } + diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java index 83f0001972e81..88da2fdfa3a9e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java @@ -10,6 +10,7 @@ import org.elasticsearch.core.CheckedRunnable; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Objects; /** @@ -28,4 +29,12 @@ protected SearchPhase(String name) { public String getName() { return name; } + + public void start() { + try { + run(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 142593edfe384..e3609d263ff47 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -21,7 +21,7 @@ import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TotalHits.Relation; -import org.apache.lucene.search.grouping.CollapseTopFieldDocs; +import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; @@ -165,21 +165,21 @@ static SortedTopDocs sortDocs(boolean ignoreFrom, final Collection topD } boolean isSortedByField = false; SortField[] sortFields = null; - String collapseField = null; - Object[] collapseValues = null; + String groupField = null; + Object[] groupValues = null; if (mergedTopDocs instanceof TopFieldDocs) { TopFieldDocs fieldDocs = (TopFieldDocs) mergedTopDocs; sortFields = fieldDocs.fields; - if (fieldDocs instanceof CollapseTopFieldDocs) { + if (fieldDocs instanceof TopFieldGroups) { isSortedByField = (fieldDocs.fields.length == 1 && fieldDocs.fields[0].getType() == SortField.Type.SCORE) == false; - CollapseTopFieldDocs collapseTopFieldDocs = (CollapseTopFieldDocs) fieldDocs; - collapseField = collapseTopFieldDocs.field; - collapseValues = collapseTopFieldDocs.collapseValues; + TopFieldGroups topFieldGroups = (TopFieldGroups) fieldDocs; + groupField = topFieldGroups.field; + groupValues = topFieldGroups.groupValues; } else { isSortedByField = true; } } - return new SortedTopDocs(scoreDocs, isSortedByField, sortFields, collapseField, collapseValues, numSuggestDocs); + return new SortedTopDocs(scoreDocs, isSortedByField, sortFields, groupField, groupValues, numSuggestDocs); } static TopDocs mergeTopDocs(Collection results, int topN, int from) { @@ -191,11 +191,11 @@ static TopDocs mergeTopDocs(Collection results, int topN, int from) { final int numShards = results.size(); if (numShards == 1 && from == 0) { // only one shard and no pagination we can just return the topDocs as we got them. return topDocs; - } else if (topDocs instanceof CollapseTopFieldDocs) { - CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) topDocs; + } else if (topDocs instanceof TopFieldGroups) { + TopFieldGroups firstTopDocs = (TopFieldGroups) topDocs; final Sort sort = new Sort(firstTopDocs.fields); - final CollapseTopFieldDocs[] shardTopDocs = results.toArray(new CollapseTopFieldDocs[numShards]); - mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs, false); + final TopFieldGroups[] shardTopDocs = results.toArray(new TopFieldGroups[numShards]); + mergedTopDocs = TopFieldGroups.merge(sort, from, topN, shardTopDocs, false); } else if (topDocs instanceof TopFieldDocs) { TopFieldDocs firstTopDocs = (TopFieldDocs) topDocs; final Sort sort = new Sort(firstTopDocs.fields); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index 3de3c95324571..684397afb4d3b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -14,7 +14,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; -import org.apache.lucene.search.grouping.CollapseTopFieldDocs; +import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.search.SearchPhaseController.TopDocsStats; import org.elasticsearch.action.search.SearchResponse.Clusters; @@ -253,7 +253,7 @@ private static TopDocs searchHitsToTopDocs(SearchHits searchHits, TotalHits tota if (searchHits.getSortFields() != null) { if (searchHits.getCollapseField() != null) { assert searchHits.getCollapseValues() != null; - topDocs = new CollapseTopFieldDocs(searchHits.getCollapseField(), totalHits, scoreDocs, + topDocs = new TopFieldGroups(searchHits.getCollapseField(), totalHits, scoreDocs, searchHits.getSortFields(), searchHits.getCollapseValues()); } else { topDocs = new TopFieldDocs(totalHits, scoreDocs, searchHits.getSortFields()); @@ -338,18 +338,18 @@ private static SearchHits topDocsToSearchHits(TopDocs topDocs, TopDocsStats topD } } SortField[] sortFields = null; - String collapseField = null; - Object[] collapseValues = null; + String groupField = null; + Object[] groupValues = null; if (topDocs instanceof TopFieldDocs) { sortFields = ((TopFieldDocs)topDocs).fields; - if (topDocs instanceof CollapseTopFieldDocs) { - CollapseTopFieldDocs collapseTopFieldDocs = (CollapseTopFieldDocs)topDocs; - collapseField = collapseTopFieldDocs.field; - collapseValues = collapseTopFieldDocs.collapseValues; + if (topDocs instanceof TopFieldGroups) { + TopFieldGroups topFieldGroups = (TopFieldGroups)topDocs; + groupField = topFieldGroups.field; + groupValues = topFieldGroups.groupValues; } } return new SearchHits(searchHits, topDocsStats.getTotalHits(), topDocsStats.getMaxScore(), - sortFields, collapseField, collapseValues); + sortFields, groupField, groupValues); } private static final class FieldDocAndSearchHit extends FieldDoc { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 41860c52174d4..627474ab81074 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.search; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; @@ -19,12 +20,14 @@ import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.search.CanMatchShardResponse; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -51,9 +54,12 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.function.BiFunction; /** @@ -73,6 +79,7 @@ public class SearchTransportService { public static final String FETCH_ID_SCROLL_ACTION_NAME = "indices:data/read/search[phase/fetch/id/scroll]"; public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]"; public static final String QUERY_CAN_MATCH_NAME = "indices:data/read/search[can_match]"; + public static final String QUERY_CAN_MATCH_NODE_NAME = "indices:data/read/search[can_match][n]"; private final TransportService transportService; private final NodeClient client; @@ -117,9 +124,57 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI } public void sendCanMatch(Transport.Connection connection, final ShardSearchRequest request, SearchTask task, final - ActionListener listener) { + ActionListener listener) { transportService.sendChildRequest(connection, QUERY_CAN_MATCH_NAME, request, task, - TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, SearchService.CanMatchResponse::new)); + TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, CanMatchShardResponse::new)); + } + + public void sendCanMatch(Transport.Connection connection, final CanMatchNodeRequest request, SearchTask task, final + ActionListener listener) { + if (connection.getVersion().onOrAfter(Version.V_7_16_0) && + connection.getNode().getVersion().onOrAfter(Version.V_7_16_0)) { + transportService.sendChildRequest(connection, QUERY_CAN_MATCH_NODE_NAME, request, task, + TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, CanMatchNodeResponse::new)); + } else { + // BWC layer: translate into shard-level requests + final List shardSearchRequests = request.createShardSearchRequests(); + final AtomicReferenceArray results = + new AtomicReferenceArray<>(shardSearchRequests.size()); + final CountDown counter = new CountDown(shardSearchRequests.size()); + final Runnable maybeFinish = () -> { + if (counter.countDown()) { + final CanMatchNodeResponse.ResponseOrFailure[] responses = + new CanMatchNodeResponse.ResponseOrFailure[shardSearchRequests.size()]; + for (int i = 0; i < responses.length; i++) { + responses[i] = results.get(i); + } + final CanMatchNodeResponse response = new CanMatchNodeResponse(Arrays.asList(responses)); + listener.onResponse(response); + } + }; + for (int i = 0; i < shardSearchRequests.size(); i++) { + final ShardSearchRequest shardSearchRequest = shardSearchRequests.get(i); + final int finalI = i; + try { + sendCanMatch(connection, shardSearchRequest, task, new ActionListener<>() { + @Override + public void onResponse(CanMatchShardResponse response) { + results.set(finalI, new CanMatchNodeResponse.ResponseOrFailure(response)); + maybeFinish.run(); + } + + @Override + public void onFailure(Exception e) { + results.set(finalI, new CanMatchNodeResponse.ResponseOrFailure(e)); + maybeFinish.run(); + } + }); + } catch (Exception e) { + results.set(finalI, new CanMatchNodeResponse.ResponseOrFailure(e)); + maybeFinish.run(); + } + } + } } public void sendClearAllScrollContexts(Transport.Connection connection, final ActionListener listener) { @@ -363,7 +418,13 @@ public static void registerRequestHandler(TransportService transportService, Sea (request, channel, task) -> { searchService.canMatch(request, new ChannelActionListener<>(channel, QUERY_CAN_MATCH_NAME, request)); }); - TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NAME, true, SearchService.CanMatchResponse::new); + TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NAME, true, CanMatchShardResponse::new); + + transportService.registerRequestHandler(QUERY_CAN_MATCH_NODE_NAME, ThreadPool.Names.SEARCH_COORDINATION, CanMatchNodeRequest::new, + (request, channel, task) -> { + searchService.canMatch(request, new ChannelActionListener<>(channel, QUERY_CAN_MATCH_NAME, request)); + }); + TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NODE_NAME, true, CanMatchNodeResponse::new); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 2b474fc1cbd20..f45837635e043 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -106,6 +106,9 @@ public class TransportSearchAction extends HandledTransportAction SHARD_COUNT_LIMIT_SETTING = Setting.longSetting( "action.search.shard_count.limit", Long.MAX_VALUE, 1L, Property.Dynamic, Property.NodeScope); + public static final Setting DEFAULT_PRE_FILTER_SHARD_SIZE = Setting.intSetting( + "action.search.pre_filter_shard_size.default", SearchRequest.DEFAULT_PRE_FILTER_SHARD_SIZE, 1, Property.NodeScope); + private final ThreadPool threadPool; private final ClusterService clusterService; private final SearchTransportService searchTransportService; @@ -116,6 +119,7 @@ public class TransportSearchAction extends HandledTransportAction buildPerIndexOriginalIndices(ClusterState clusterState, @@ -742,7 +747,7 @@ private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, Sea nodes::get, remoteConnections, searchTransportService::getConnection); final Executor asyncSearchExecutor = asyncSearchExecutor(concreteLocalIndices); final boolean preFilterSearchShards = shouldPreFilterSearchShards(clusterState, searchRequest, concreteLocalIndices, - localShardIterators.size() + remoteShardIterators.size()); + localShardIterators.size() + remoteShardIterators.size(), defaultPreFilterShardSize); searchAsyncActionProvider.asyncSearchAction( task, searchRequest, asyncSearchExecutor, shardIterators, timeProvider, connectionLookup, clusterState, Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener, @@ -789,14 +794,15 @@ static BiFunction buildConnectionLookup(St static boolean shouldPreFilterSearchShards(ClusterState clusterState, SearchRequest searchRequest, String[] indices, - int numShards) { + int numShards, + int defaultPreFilterShardSize) { SearchSourceBuilder source = searchRequest.source(); Integer preFilterShardSize = searchRequest.getPreFilterShardSize(); if (preFilterShardSize == null && (hasReadOnlyIndices(indices, clusterState) || hasPrimaryFieldSort(source))) { preFilterShardSize = 1; } else if (preFilterShardSize == null) { - preFilterShardSize = SearchRequest.DEFAULT_PRE_FILTER_SHARD_SIZE; + preFilterShardSize = defaultPreFilterShardSize; } return searchRequest.searchType() == QUERY_THEN_FETCH // we can't do this for DFS it needs to fan out to all shards all the time && (SearchService.canRewriteToMatchNone(source) || hasPrimaryFieldSort(source)) @@ -821,14 +827,14 @@ static GroupShardsIterator mergeShardsIterators(List asyncSearchAction( + SearchPhase asyncSearchAction( SearchTask task, SearchRequest searchRequest, Executor executor, GroupShardsIterator shardIterators, SearchTimeProvider timeProvider, BiFunction connectionLookup, ClusterState clusterState, Map aliasFilter, Map concreteIndexBoosts, ActionListener listener, boolean preFilter, ThreadPool threadPool, SearchResponse.Clusters clusters); } - private AbstractSearchAsyncAction searchAsyncAction( + private SearchPhase searchAsyncAction( SearchTask task, SearchRequest searchRequest, Executor executor, @@ -844,9 +850,9 @@ private AbstractSearchAsyncAction searchAsyncAction SearchResponse.Clusters clusters) { if (preFilter) { return new CanMatchPreFilterSearchPhase(logger, searchTransportService, connectionLookup, - aliasFilter, concreteIndexBoosts, executor, searchRequest, listener, shardIterators, - timeProvider, clusterState, task, (iter) -> { - AbstractSearchAsyncAction action = searchAsyncAction( + aliasFilter, concreteIndexBoosts, threadPool.executor(ThreadPool.Names.SEARCH_COORDINATION), searchRequest, listener, + shardIterators, timeProvider, task, (iter) -> { + SearchPhase action = searchAsyncAction( task, searchRequest, executor, @@ -860,12 +866,8 @@ private AbstractSearchAsyncAction searchAsyncAction false, threadPool, clusters); - return new SearchPhase(action.getName()) { - @Override - public void run() { - action.start(); - } - }; + assert action instanceof AbstractSearchAsyncAction; + return action; }, clusters, searchService.getCoordinatorRewriteContextProvider(timeProvider::getAbsoluteStartMillis)); } else { final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults(executor, diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java index 5b04750b57a3f..f6e553e62c948 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java @@ -25,9 +25,14 @@ public abstract class AcknowledgedRequest getNamedWriteables() { ComposableIndexTemplateMetadata::readDiffFrom); registerMetadataCustom(entries, DataStreamMetadata.TYPE, DataStreamMetadata::new, DataStreamMetadata::readDiffFrom); registerMetadataCustom(entries, NodesShutdownMetadata.TYPE, NodesShutdownMetadata::new, NodesShutdownMetadata::readDiffFrom); + registerMetadataCustom( + entries, + FeatureMigrationResults.TYPE, + FeatureMigrationResults::new, + FeatureMigrationResults::readDiffFrom + ); // Task Status (not Diffable) entries.add(new Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new)); @@ -266,7 +270,6 @@ protected void configure() { bind(MetadataIndexStateService.class).asEagerSingleton(); bind(MetadataMappingService.class).asEagerSingleton(); bind(MetadataIndexAliasesService.class).asEagerSingleton(); - bind(MetadataUpdateSettingsService.class).asEagerSingleton(); bind(MetadataIndexTemplateService.class).asEagerSingleton(); bind(IndexNameExpressionResolver.class).toInstance(indexNameExpressionResolver); bind(DelayedAllocationService.class).asEagerSingleton(); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index e93b5cf77592e..f7d4e1dc82c48 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.cluster; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; import java.util.IdentityHashMap; @@ -46,7 +47,12 @@ default void clusterStatePublished(ClusterStatePublicationEvent clusterStatePubl * This allows groupd task description but the submitting source. */ default String describeTasks(List tasks) { - return String.join(", ", tasks.stream().map(t -> (CharSequence)t.toString()).filter(t -> t.length() > 0)::iterator); + final StringBuilder output = new StringBuilder(); + Strings.collectionToDelimitedStringWithLimit( + (Iterable) () -> tasks.stream().map(Object::toString).filter(s -> s.isEmpty() == false).iterator(), + ", ", "", "", 1024, output + ); + return output.toString(); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 06e345c1aa2ff..0b54eb9ef0af6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -19,14 +19,13 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AdjustableSemaphore; import org.elasticsearch.core.TimeValue; import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.mapper.Mapping; -import java.util.concurrent.Semaphore; - /** * Called by shards in the cluster when their mapping was dynamically updated and it needs to be updated * in the cluster state meta data (and broadcast to all members). @@ -106,30 +105,4 @@ protected void sendUpdateMapping(Index index, Mapping mappingUpdate, ActionListe client.execute(AutoPutMappingAction.INSTANCE, putMappingRequest, ActionListener.wrap(r -> listener.onResponse(null), listener::onFailure)); } - - static class AdjustableSemaphore extends Semaphore { - - private final Object maxPermitsMutex = new Object(); - private int maxPermits; - - AdjustableSemaphore(int maxPermits, boolean fair) { - super(maxPermits, fair); - this.maxPermits = maxPermits; - } - - void setMaxPermits(int permits) { - synchronized (maxPermitsMutex) { - final int diff = Math.subtractExact(permits, maxPermits); - if (diff > 0) { - // add permits - release(diff); - } else if (diff < 0) { - // remove permits - reducePermits(Math.negateExact(diff)); - } - - maxPermits = permits; - } - } - } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java index f4041e76218ca..21a965f3fc60f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -9,6 +9,7 @@ import joptsimple.OptionParser; import joptsimple.OptionSet; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.LockObtainFailedException; @@ -22,20 +23,22 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.metadata.ComponentTemplateMetadata; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplateMetadata; import org.elasticsearch.cluster.metadata.DataStreamMetadata; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.core.Tuple; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.core.Tuple; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.gateway.PersistedClusterStateService; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.nio.file.Files; @@ -68,7 +71,8 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { public T parseNamedObject(Class categoryClass, String name, XContentParser parser, C context) throws IOException { // Currently, two unknown top-level objects are present if (Metadata.Custom.class.isAssignableFrom(categoryClass)) { - if (DataStreamMetadata.TYPE.equals(name)) { + if (DataStreamMetadata.TYPE.equals(name) || ComposableIndexTemplateMetadata.TYPE.equals(name) + || ComponentTemplateMetadata.TYPE.equals(name)) { // DataStreamMetadata is used inside Metadata class for validation purposes and building the indicesLookup, // therefor even es node commands need to be able to parse it. return super.parseNamedObject(categoryClass, name, parser, context); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java index 2548431276020..ecfece366f7ad 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplate.java @@ -129,7 +129,7 @@ public String toString() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(TEMPLATE.getPreferredName(), this.template); + builder.field(TEMPLATE.getPreferredName(), this.template, params); if (this.version != null) { builder.field(VERSION.getPreferredName(), this.version); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java index c655145bd7ff0..883372b9a4638 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java @@ -98,7 +98,7 @@ public static ComponentTemplateMetadata fromXContent(XContentParser parser) thro public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(COMPONENT_TEMPLATE.getPreferredName()); for (Map.Entry template : componentTemplates.entrySet()) { - builder.field(template.getKey(), template.getValue()); + builder.field(template.getKey(), template.getValue(), params); } builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index 80ab79c2b24ad..79581380d296b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -202,7 +202,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.stringListField(INDEX_PATTERNS.getPreferredName(), this.indexPatterns); if (this.template != null) { - builder.field(TEMPLATE.getPreferredName(), this.template); + builder.field(TEMPLATE.getPreferredName(), this.template, params); } if (this.componentTemplates != null) { builder.stringListField(COMPOSED_OF.getPreferredName(), this.componentTemplates); @@ -217,7 +217,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(METADATA.getPreferredName(), metadata); } if (this.dataStreamTemplate != null) { - builder.field(DATA_STREAM.getPreferredName(), dataStreamTemplate); + builder.field(DATA_STREAM.getPreferredName(), dataStreamTemplate, params); } if (this.allowAutoCreate != null) { builder.field(ALLOW_AUTO_CREATE.getPreferredName(), allowAutoCreate); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java index 67fd84779f6b7..1fe3f3ab8d8c2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java @@ -99,7 +99,7 @@ public void writeTo(StreamOutput out) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(INDEX_TEMPLATE.getPreferredName()); for (Map.Entry template : indexTemplates.entrySet()) { - builder.field(template.getKey(), template.getValue()); + builder.field(template.getKey(), template.getValue(), params); } builder.endObject(); return builder; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 0fa3ebe26187e..9bfff34ccac09 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.Diff; import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Tuple; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -174,26 +175,42 @@ public boolean isAllowCustomRouting() { * Performs a rollover on a {@code DataStream} instance and returns a new instance containing * the updated list of backing indices and incremented generation. * - * @param clusterMetadata Cluster metadata - * @param writeIndexUuid UUID for the data stream's new write index + * @param writeIndex new write index + * @param generation new generation * * @return new {@code DataStream} instance with the rollover operation applied */ - public DataStream rollover(Metadata clusterMetadata, String writeIndexUuid) { - if (replicated) { - throw new IllegalArgumentException("data stream [" + name + "] cannot be rolled over, " + - "because it is a replicated data stream"); - } + public DataStream rollover(Index writeIndex, long generation) { + ensureNotReplicated(); List backingIndices = new ArrayList<>(indices); + backingIndices.add(writeIndex); + return new DataStream(name, timeStampField, backingIndices, generation, metadata, hidden, false, system, allowCustomRouting); + } + + /** + * Performs a dummy rollover on a {@code DataStream} instance and returns the tuple of the next write index name and next generation + * that this {@code DataStream} should roll over to using {@link #rollover(Index, long)}. + * + * @param clusterMetadata Cluster metadata + * + * @return new {@code DataStream} instance with the dummy rollover operation applied + */ + public Tuple nextWriteIndexAndGeneration(Metadata clusterMetadata) { + ensureNotReplicated(); String newWriteIndexName; long generation = this.generation; long currentTimeMillis = timeProvider.getAsLong(); do { newWriteIndexName = DataStream.getDefaultBackingIndexName(getName(), ++generation, currentTimeMillis); } while (clusterMetadata.getIndicesLookup().containsKey(newWriteIndexName)); - backingIndices.add(new Index(newWriteIndexName, writeIndexUuid)); - return new DataStream(name, timeStampField, backingIndices, generation, metadata, hidden, replicated, system, allowCustomRouting); + return Tuple.tuple(newWriteIndexName, generation); + } + + private void ensureNotReplicated() { + if (replicated) { + throw new IllegalArgumentException("data stream [" + name + "] cannot be rolled over, because it is a replicated data stream"); + } } /** @@ -214,7 +231,7 @@ public DataStream removeBackingIndex(Index index) { index.getName(), name )); } - if (generation == (backingIndexPosition + 1)) { + if (indices.size() == (backingIndexPosition + 1)) { throw new IllegalArgumentException(String.format( Locale.ROOT, "cannot remove backing index [%s] of data stream [%s] because it is the write index", diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAction.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAction.java index 63e9d0e10ed0b..48b9cd9100360 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAction.java @@ -9,73 +9,190 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; +import java.util.function.Supplier; + +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; /** - * Operations on data streams + * Operations on data streams. Currently supports adding and removing backing indices. */ -public abstract class DataStreamAction { - private final String dataStream; +public class DataStreamAction implements Writeable, ToXContentObject { + + private static final ParseField DATA_STREAM = new ParseField("data_stream"); + private static final ParseField INDEX = new ParseField("index"); + + private static final ParseField ADD_BACKING_INDEX = new ParseField("add_backing_index"); + private static final ParseField REMOVE_BACKING_INDEX = new ParseField("remove_backing_index"); + + public enum Type { + ADD_BACKING_INDEX((byte) 0, DataStreamAction.ADD_BACKING_INDEX), + REMOVE_BACKING_INDEX((byte) 1, DataStreamAction.REMOVE_BACKING_INDEX); + + private final byte value; + private final String fieldName; + + Type(byte value, ParseField field) { + this.value = value; + this.fieldName = field.getPreferredName(); + } + + public byte value() { + return value; + } + + public static Type fromValue(byte value) { + switch (value) { + case 0: return ADD_BACKING_INDEX; + case 1: return REMOVE_BACKING_INDEX; + default: throw new IllegalArgumentException("no data stream action type for [" + value + "]"); + } + } + } + + private final Type type; + private String dataStream; + private String index; public static DataStreamAction addBackingIndex(String dataStream, String index) { - return new DataStreamAction.AddBackingIndex(dataStream, index); + return new DataStreamAction(Type.ADD_BACKING_INDEX, dataStream, index); } public static DataStreamAction removeBackingIndex(String dataStream, String index) { - return new DataStreamAction.RemoveBackingIndex(dataStream, index); + return new DataStreamAction(Type.REMOVE_BACKING_INDEX, dataStream, index); } - private DataStreamAction(String dataStream) { + public DataStreamAction(StreamInput in) throws IOException { + this.type = Type.fromValue(in.readByte()); + this.dataStream = in.readString(); + this.index = in.readString(); + } + + private DataStreamAction(Type type, String dataStream, String index) { if (false == Strings.hasText(dataStream)) { throw new IllegalArgumentException("[data_stream] is required"); } + if (false == Strings.hasText(index)) { + throw new IllegalArgumentException("[index] is required"); + } + this.type = Objects.requireNonNull(type, "[type] must not be null"); this.dataStream = dataStream; + this.index = index; + } + + DataStreamAction(Type type) { + this.type = type; } - /** - * Data stream on which the operation should act - */ public String getDataStream() { return dataStream; } - public static class AddBackingIndex extends DataStreamAction { - - private final String index; + public void setDataStream(String datastream) { + this.dataStream = datastream; + } - private AddBackingIndex(String dataStream, String index) { - super(dataStream); + public String getIndex() { + return index; + } - if (false == Strings.hasText(index)) { - throw new IllegalArgumentException("[index] is required"); - } + public void setIndex(String index) { + this.index = index; + } - this.index = index; - } + public Type getType() { + return type; + } - public String getIndex() { - return index; - } + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(type.fieldName); + builder.field(DATA_STREAM.getPreferredName(), dataStream); + builder.field(INDEX.getPreferredName(), index); + builder.endObject(); + builder.endObject(); + return builder; + } + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte(type.value()); + out.writeString(dataStream); + out.writeString(index); } - public static class RemoveBackingIndex extends DataStreamAction { + public static DataStreamAction fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } - private final String index; + private static final ObjectParser ADD_BACKING_INDEX_PARSER = parser( + ADD_BACKING_INDEX.getPreferredName(), + () -> new DataStreamAction(Type.ADD_BACKING_INDEX) + ); + private static final ObjectParser REMOVE_BACKING_INDEX_PARSER = parser( + REMOVE_BACKING_INDEX.getPreferredName(), + () -> new DataStreamAction(Type.REMOVE_BACKING_INDEX) + ); + static { + ADD_BACKING_INDEX_PARSER.declareField(DataStreamAction::setDataStream, XContentParser::text, DATA_STREAM, + ObjectParser.ValueType.STRING); + ADD_BACKING_INDEX_PARSER.declareField(DataStreamAction::setIndex, XContentParser::text, INDEX, ObjectParser.ValueType.STRING); + REMOVE_BACKING_INDEX_PARSER.declareField(DataStreamAction::setDataStream, XContentParser::text, DATA_STREAM, + ObjectParser.ValueType.STRING); + REMOVE_BACKING_INDEX_PARSER.declareField(DataStreamAction::setIndex, XContentParser::text, INDEX, ObjectParser.ValueType.STRING); + } - private RemoveBackingIndex(String dataStream, String index) { - super(dataStream); + private static ObjectParser parser(String name, Supplier supplier) { + ObjectParser parser = new ObjectParser<>(name, supplier); + return parser; + } - if (false == Strings.hasText(index)) { - throw new IllegalArgumentException("[index] is required"); + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "data_stream_action", a -> { + // Take the first action and error if there is more than one action + DataStreamAction action = null; + for (Object o : a) { + if (o != null) { + if (action == null) { + action = (DataStreamAction) o; + } else { + throw new IllegalArgumentException("too many data stream operations declared on operation entry"); + } } - - this.index = index; } + return action; + }); + static { + PARSER.declareObject(optionalConstructorArg(), ADD_BACKING_INDEX_PARSER, ADD_BACKING_INDEX); + PARSER.declareObject(optionalConstructorArg(), REMOVE_BACKING_INDEX_PARSER, REMOVE_BACKING_INDEX); + } - public String getIndex() { - return index; + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != getClass()) { + return false; } + DataStreamAction other = (DataStreamAction) obj; + return Objects.equals(type, other.type) + && Objects.equals(dataStream, other.dataStream) + && Objects.equals(index, other.index); + } + @Override + public int hashCode() { + return Objects.hash(type, dataStream, index); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstraction.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstraction.java index cd199057e1d96..79f458a26411e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstraction.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstraction.java @@ -179,6 +179,23 @@ public boolean isSystem() { public List getAliases() { return aliases; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConcreteIndex that = (ConcreteIndex) o; + return isHidden == that.isHidden && + isSystem == that.isSystem && + concreteIndexName.equals(that.concreteIndexName) && + Objects.equals(aliases, that.aliases) && + Objects.equals(dataStream, that.dataStream); + } + + @Override + public int hashCode() { + return Objects.hash(concreteIndexName, isHidden, isSystem, aliases, dataStream); + } } /** @@ -322,6 +339,24 @@ private void validateAliasProperties(List referenceIndexMetadatas private boolean isNonEmpty(List idxMetas) { return (Objects.isNull(idxMetas) || idxMetas.isEmpty()) == false; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Alias alias = (Alias) o; + return isHidden == alias.isHidden && + isSystem == alias.isSystem && + dataStreamAlias == alias.dataStreamAlias && + aliasName.equals(alias.aliasName) && + referenceIndexMetadatas.equals(alias.referenceIndexMetadatas) && + Objects.equals(writeIndex, alias.writeIndex); + } + + @Override + public int hashCode() { + return Objects.hash(aliasName, referenceIndexMetadatas, writeIndex, isHidden, isSystem, dataStreamAlias); + } } class DataStream implements IndexAbstraction { @@ -383,6 +418,20 @@ public List getAliases() { public org.elasticsearch.cluster.metadata.DataStream getDataStream() { return dataStream; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DataStream that = (DataStream) o; + return dataStream.equals(that.dataStream) && + Objects.equals(referencedByDataStreamAliases, that.referencedByDataStreamAliases); + } + + @Override + public int hashCode() { + return Objects.hash(dataStream, referencedByDataStreamAliases); + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java index add6ff97597fc..b7781680aca92 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexAbstractionResolver.java @@ -103,7 +103,7 @@ public List resolveIndexAbstractions(Iterable indices, IndicesOp } else if (dateMathName.equals(indexAbstraction)) { if (minus) { finalIndices.remove(indexAbstraction); - } else { + } else if (indicesOptions.ignoreUnavailable() == false || availableIndexAbstractions.contains(indexAbstraction)) { finalIndices.add(indexAbstraction); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index e3d6a00dfa953..e0147354e98cb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -37,12 +37,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.ToXContentFragment; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.gateway.MetadataStateFormat; @@ -52,6 +47,11 @@ import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.time.Instant; @@ -149,7 +149,7 @@ static Setting buildNumberOfShardsSetting() { public static final String SETTING_ROUTING_PARTITION_SIZE = "index.routing_partition_size"; public static final Setting INDEX_ROUTING_PARTITION_SIZE_SETTING = - Setting.intSetting(SETTING_ROUTING_PARTITION_SIZE, 1, 1, Property.IndexScope); + Setting.intSetting(SETTING_ROUTING_PARTITION_SIZE, 1, 1, Property.Final, Property.IndexScope); @SuppressWarnings("Convert2Diamond") // since some IntelliJs mysteriously report an error if an is replaced with <> here: public static final Setting INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING = Setting.intSetting( @@ -677,7 +677,7 @@ public IndexLongFieldRange getTimestampRange() { return timestampRange; } - + @Override public boolean equals(Object o) { @@ -1638,6 +1638,75 @@ public static IndexMetadata fromXContent(XContentParser parser) throws IOExcepti } return builder.build(); } + + /** + * Used to load legacy metadata from ES versions that are no longer index-compatible. + * Returns information on best-effort basis. + * Throws an exception if the metadata is index-compatible with the current version (in that case, + * {@link #fromXContent} should be used to load the content. + */ + public static IndexMetadata legacyFromXContent(XContentParser parser) throws IOException { + if (parser.currentToken() == null) { // fresh parser? move to the first token + parser.nextToken(); + } + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token + parser.nextToken(); + } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); + Builder builder = new Builder(parser.currentName()); + + String currentFieldName = null; + XContentParser.Token token = parser.nextToken(); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("settings".equals(currentFieldName)) { + Settings settings = Settings.fromXContent(parser); + if (SETTING_INDEX_VERSION_CREATED.get(settings).onOrAfter(Version.CURRENT.minimumIndexCompatibilityVersion())) { + throw new IllegalStateException("this method should only be used to parse older index metadata versions " + + "but got " + SETTING_INDEX_VERSION_CREATED.get(settings)); + } + builder.settings(settings); + } else if ("mappings".equals(currentFieldName)) { + // don't try to parse these for now + parser.skipChildren(); + } else { + // assume it's custom index metadata + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if ("mappings".equals(currentFieldName)) { + // don't try to parse these for now + parser.skipChildren(); + } else { + parser.skipChildren(); + } + } else if (token.isValue()) { + if ("state".equals(currentFieldName)) { + builder.state(State.fromString(parser.text())); + } else if ("version".equals(currentFieldName)) { + builder.version(parser.longValue()); + } else if ("mapping_version".equals(currentFieldName)) { + builder.mappingVersion(parser.longValue()); + } else if ("settings_version".equals(currentFieldName)) { + builder.settingsVersion(parser.longValue()); + } else if ("routing_num_shards".equals(currentFieldName)) { + builder.setRoutingNumShards(parser.intValue()); + } else { + // unknown, ignore + } + } else { + XContentParserUtils.throwUnknownToken(token, parser.getTokenLocation()); + } + } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); + + IndexMetadata indexMetadata = builder.build(); + assert indexMetadata.getCreationVersion().before(Version.CURRENT.minimumIndexCompatibilityVersion()); + return indexMetadata; + } } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index b5d7fcddd91a3..589739d80dafd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -1038,15 +1038,18 @@ public static class Builder { private final ImmutableOpenMap.Builder templates; private final ImmutableOpenMap.Builder customs; + private SortedMap previousIndicesLookup; + public Builder() { clusterUUID = UNKNOWN_CLUSTER_UUID; indices = ImmutableOpenMap.builder(); templates = ImmutableOpenMap.builder(); customs = ImmutableOpenMap.builder(); indexGraveyard(IndexGraveyard.builder().build()); // create new empty index graveyard to initialize + previousIndicesLookup = null; } - public Builder(Metadata metadata) { + Builder(Metadata metadata) { this.clusterUUID = metadata.clusterUUID; this.clusterUUIDCommitted = metadata.clusterUUIDCommitted; this.coordinationMetadata = metadata.coordinationMetadata; @@ -1057,13 +1060,17 @@ public Builder(Metadata metadata) { this.indices = ImmutableOpenMap.builder(metadata.indices); this.templates = ImmutableOpenMap.builder(metadata.templates); this.customs = ImmutableOpenMap.builder(metadata.customs); + previousIndicesLookup = metadata.getIndicesLookup(); } public Builder put(IndexMetadata.Builder indexMetadataBuilder) { // we know its a new one, increment the version and store indexMetadataBuilder.version(indexMetadataBuilder.version() + 1); IndexMetadata indexMetadata = indexMetadataBuilder.build(); - indices.put(indexMetadata.getIndex().getName(), indexMetadata); + IndexMetadata previous = indices.put(indexMetadata.getIndex().getName(), indexMetadata); + if (unsetPreviousIndicesLookup(previous, indexMetadata)) { + previousIndicesLookup = null; + } return this; } @@ -1075,10 +1082,37 @@ public Builder put(IndexMetadata indexMetadata, boolean incrementVersion) { if (incrementVersion) { indexMetadata = IndexMetadata.builder(indexMetadata).version(indexMetadata.getVersion() + 1).build(); } - indices.put(indexMetadata.getIndex().getName(), indexMetadata); + IndexMetadata previous = indices.put(indexMetadata.getIndex().getName(), indexMetadata); + if (unsetPreviousIndicesLookup(previous, indexMetadata)) { + previousIndicesLookup = null; + } return this; } + boolean unsetPreviousIndicesLookup(IndexMetadata previous, IndexMetadata current) { + if (previous == null) { + return true; + } + + if (previous.getAliases().equals(current.getAliases()) == false) { + return true; + } + + if (previous.isHidden() != current.isHidden()) { + return true; + } + + if (previous.isSystem() != current.isSystem()) { + return true; + } + + if (previous.getState() != current.getState()) { + return true; + } + + return false; + } + public IndexMetadata get(String index) { return indices.get(index); } @@ -1097,16 +1131,22 @@ public IndexMetadata getSafe(Index index) { } public Builder remove(String index) { + previousIndicesLookup = null; + indices.remove(index); return this; } public Builder removeAllIndices() { + previousIndicesLookup = null; + indices.clear(); return this; } public Builder indices(ImmutableOpenMap indices) { + previousIndicesLookup = null; + this.indices.putAll(indices); return this; } @@ -1187,6 +1227,8 @@ public Builder removeIndexTemplate(String name) { } public DataStream dataStream(String dataStreamName) { + previousIndicesLookup = null; + DataStreamMetadata dataStreamMetadata = (DataStreamMetadata) customs.get(DataStreamMetadata.TYPE); if (dataStreamMetadata != null) { return dataStreamMetadata.dataStreams().get(dataStreamName); @@ -1196,11 +1238,15 @@ public DataStream dataStream(String dataStreamName) { } public Builder dataStreams(Map dataStreams, Map dataStreamAliases) { + previousIndicesLookup = null; + this.customs.put(DataStreamMetadata.TYPE, new DataStreamMetadata(dataStreams, dataStreamAliases)); return this; } public Builder put(DataStream dataStream) { + previousIndicesLookup = null; + Objects.requireNonNull(dataStream, "it is invalid to add a null data stream"); Map existingDataStreams = Optional.ofNullable((DataStreamMetadata) this.customs.get(DataStreamMetadata.TYPE)) @@ -1217,6 +1263,8 @@ public Builder put(DataStream dataStream) { } public boolean put(String aliasName, String dataStream, Boolean isWriteDataStream, String filter) { + previousIndicesLookup = null; + Map existingDataStream = Optional.ofNullable((DataStreamMetadata) this.customs.get(DataStreamMetadata.TYPE)) .map(dsmd -> new HashMap<>(dsmd.dataStreams())) @@ -1255,6 +1303,8 @@ public boolean put(String aliasName, String dataStream, Boolean isWriteDataStrea } public Builder removeDataStream(String name) { + previousIndicesLookup = null; + Map existingDataStreams = Optional.ofNullable((DataStreamMetadata) this.customs.get(DataStreamMetadata.TYPE)) .map(dsmd -> new HashMap<>(dsmd.dataStreams())) @@ -1291,6 +1341,8 @@ public Builder removeDataStream(String name) { } public boolean removeDataStreamAlias(String aliasName, String dataStreamName, boolean mustExist) { + previousIndicesLookup = null; + Map dataStreamAliases = Optional.ofNullable((DataStreamMetadata) this.customs.get(DataStreamMetadata.TYPE)) .map(dsmd -> new HashMap<>(dsmd.getDataStreamAliases())) @@ -1538,10 +1590,15 @@ public Metadata build(boolean builtIndicesLookupEagerly) { ImmutableOpenMap indices = this.indices.build(); SortedMap indicesLookup; - if (builtIndicesLookupEagerly) { - indicesLookup = Collections.unmodifiableSortedMap(buildIndicesLookup(dataStreamMetadata, indices)); + if (previousIndicesLookup != null) { + assert previousIndicesLookup.equals(buildIndicesLookup(dataStreamMetadata, indices)); + indicesLookup = previousIndicesLookup; } else { - indicesLookup = null; + if (builtIndicesLookupEagerly) { + indicesLookup = Collections.unmodifiableSortedMap(buildIndicesLookup(dataStreamMetadata, indices)); + } else { + indicesLookup = null; + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index 5dff916955b04..9341589705209 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -9,10 +9,10 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.datastreams.ModifyDataStreamsAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.Settings; @@ -20,8 +20,6 @@ import org.elasticsearch.indices.IndicesService; import java.io.IOException; -import java.util.Collections; -import java.util.List; import java.util.function.Function; /** @@ -37,9 +35,9 @@ public MetadataDataStreamsService(ClusterService clusterService, IndicesService this.indicesService = indicesService; } - public void updateBackingIndices(final ModifyDataStreamRequest request, - final ActionListener listener) { - if (request.actions().size() == 0) { + public void modifyDataStream(final ModifyDataStreamsAction.Request request, + final ActionListener listener) { + if (request.getActions().size() == 0) { listener.onResponse(AcknowledgedResponse.TRUE); } else { clusterService.submitStateUpdateTask("update-backing-indices", @@ -48,7 +46,7 @@ public void updateBackingIndices(final ModifyDataStreamRequest request, public ClusterState execute(ClusterState currentState) { return modifyDataStream( currentState, - request.actions(), + request.getActions(), indexMetadata -> { try { return indicesService.createIndexMapperService(indexMetadata); @@ -66,7 +64,7 @@ public ClusterState execute(ClusterState currentState) { * Computes the resulting cluster state after applying all requested data stream modifications in order. * * @param currentState current cluster state - * @param actions ordered list of modifications to perform + * @param actions ordered list of modifications to perform * @return resulting cluster state after all modifications have been performed */ static ClusterState modifyDataStream( @@ -78,20 +76,20 @@ static ClusterState modifyDataStream( for (var action : actions) { Metadata.Builder builder = Metadata.builder(updatedMetadata); - if (action instanceof DataStreamAction.AddBackingIndex) { + if (action.getType() == DataStreamAction.Type.ADD_BACKING_INDEX) { addBackingIndex( updatedMetadata, builder, mapperSupplier, action.getDataStream(), - ((DataStreamAction.AddBackingIndex) action).getIndex() + action.getIndex() ); - } else if (action instanceof DataStreamAction.RemoveBackingIndex) { + } else if (action.getType() == DataStreamAction.Type.REMOVE_BACKING_INDEX) { removeBackingIndex( updatedMetadata, builder, action.getDataStream(), - ((DataStreamAction.RemoveBackingIndex) action).getIndex() + action.getIndex() ); } else { throw new IllegalStateException("unsupported data stream action type [" + action.getClass().getName() + "]"); @@ -155,17 +153,4 @@ private static IndexAbstraction validateIndex(Metadata metadata, String indexNam return index; } - public static final class ModifyDataStreamRequest extends ClusterStateUpdateRequest { - - private final List actions; - - public ModifyDataStreamRequest(List actions) { - this.actions = Collections.unmodifiableList(actions); - } - - public List actions() { - return actions; - } - } - } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index 56e41f89aa905..b39aac641c2a3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; @@ -41,11 +42,9 @@ import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService.MergeReason; @@ -498,7 +497,7 @@ public ClusterState addIndexTemplateV2(final ClusterState currentState, final bo .collect(Collectors.joining(",")), name); logger.warn(warning); - HeaderWarning.addWarning(warning); + HeaderWarning.addWarning(DeprecationLogger.CRITICAL, warning); } ComposableIndexTemplate finalIndexTemplate = template; @@ -828,7 +827,7 @@ static ClusterState innerPutTemplate(final ClusterState currentState, PutRequest .collect(Collectors.joining(",")), request.name); logger.warn(warning); - HeaderWarning.addWarning(warning); + HeaderWarning.addWarning(DeprecationLogger.CRITICAL, warning); } else { // Otherwise, this is a hard error, the user should use V2 index templates instead String error = String.format(Locale.ROOT, "legacy template [%s] has index patterns %s matching patterns" + @@ -1230,16 +1229,6 @@ private static void validateCompositeTemplate(final ClusterState state, String indexName = DataStream.BACKING_INDEX_PREFIX + temporaryIndexName; // Parse mappings to ensure they are valid after being composed - if (template.getDataStreamTemplate() != null) { - // If there is no _data_stream meta field mapper and a data stream should be created then - // fail as if the data_stream field can't be parsed: - if (tempIndexService.mapperService().isMetadataField(DataStreamTimestampFieldMapper.NAME) == false) { - // Fail like a parsing expection, since we will be moving data_stream template out of server module and - // then we would fail with the same error message, like we do here. - throw new XContentParseException("[index_template] unknown field [data_stream]"); - } - } - List mappings = collectMappings(stateWithIndex, templateName, indexName, xContentRegistry); try { MapperService mapperService = tempIndexService.mapperService(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java index e25808bc1d24f..9800f732e6d3a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -22,7 +22,6 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; @@ -57,7 +56,6 @@ public class MetadataUpdateSettingsService { private final ShardLimitValidator shardLimitValidator; private final ThreadPool threadPool; - @Inject public MetadataUpdateSettingsService(ClusterService clusterService, AllocationService allocationService, IndexScopedSettings indexScopedSettings, IndicesService indicesService, ShardLimitValidator shardLimitValidator, ThreadPool threadPool) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index ffb23b6835b7b..8e7833aae6ee1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -11,6 +11,7 @@ import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; @@ -27,6 +28,7 @@ import org.elasticsearch.index.mapper.MapperService; import java.io.IOException; +import java.util.Base64; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -47,8 +49,18 @@ public class Template extends AbstractDiffable